python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Bobby Eshleman <[email protected]>
*
* Based off of net/unix/unix_bpf.c
*/
#include <linux/bpf.h>
#include <linux/module.h>
#include <linux/skmsg.h>
#include <linux/socket.h>
#include <linux/wait.h>
#include <net/af_vsock.h>
#include <net/sock.h>
#define vsock_sk_has_data(__sk, __psock) \
({ !skb_queue_empty(&(__sk)->sk_receive_queue) || \
!skb_queue_empty(&(__psock)->ingress_skb) || \
!list_empty(&(__psock)->ingress_msg); \
})
static struct proto *vsock_prot_saved __read_mostly;
static DEFINE_SPINLOCK(vsock_prot_lock);
static struct proto vsock_bpf_prot;
static bool vsock_has_data(struct sock *sk, struct sk_psock *psock)
{
struct vsock_sock *vsk = vsock_sk(sk);
s64 ret;
ret = vsock_connectible_has_data(vsk);
if (ret > 0)
return true;
return vsock_sk_has_data(sk, psock);
}
static bool vsock_msg_wait_data(struct sock *sk, struct sk_psock *psock, long timeo)
{
bool ret;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
if (sk->sk_shutdown & RCV_SHUTDOWN)
return true;
if (!timeo)
return false;
add_wait_queue(sk_sleep(sk), &wait);
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
ret = vsock_has_data(sk, psock);
if (!ret) {
wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
ret = vsock_has_data(sk, psock);
}
sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
remove_wait_queue(sk_sleep(sk), &wait);
return ret;
}
static int __vsock_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags)
{
struct socket *sock = sk->sk_socket;
int err;
if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)
err = vsock_connectible_recvmsg(sock, msg, len, flags);
else if (sk->sk_type == SOCK_DGRAM)
err = vsock_dgram_recvmsg(sock, msg, len, flags);
else
err = -EPROTOTYPE;
return err;
}
static int vsock_bpf_recvmsg(struct sock *sk, struct msghdr *msg,
size_t len, int flags, int *addr_len)
{
struct sk_psock *psock;
int copied;
psock = sk_psock_get(sk);
if (unlikely(!psock))
return __vsock_recvmsg(sk, msg, len, flags);
lock_sock(sk);
if (vsock_has_data(sk, psock) && sk_psock_queue_empty(psock)) {
release_sock(sk);
sk_psock_put(sk, psock);
return __vsock_recvmsg(sk, msg, len, flags);
}
copied = sk_msg_recvmsg(sk, psock, msg, len, flags);
while (copied == 0) {
long timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
if (!vsock_msg_wait_data(sk, psock, timeo)) {
copied = -EAGAIN;
break;
}
if (sk_psock_queue_empty(psock)) {
release_sock(sk);
sk_psock_put(sk, psock);
return __vsock_recvmsg(sk, msg, len, flags);
}
copied = sk_msg_recvmsg(sk, psock, msg, len, flags);
}
release_sock(sk);
sk_psock_put(sk, psock);
return copied;
}
/* Copy of original proto with updated sock_map methods */
static struct proto vsock_bpf_prot = {
.close = sock_map_close,
.recvmsg = vsock_bpf_recvmsg,
.sock_is_readable = sk_msg_is_readable,
.unhash = sock_map_unhash,
};
static void vsock_bpf_rebuild_protos(struct proto *prot, const struct proto *base)
{
*prot = *base;
prot->close = sock_map_close;
prot->recvmsg = vsock_bpf_recvmsg;
prot->sock_is_readable = sk_msg_is_readable;
}
static void vsock_bpf_check_needs_rebuild(struct proto *ops)
{
/* Paired with the smp_store_release() below. */
if (unlikely(ops != smp_load_acquire(&vsock_prot_saved))) {
spin_lock_bh(&vsock_prot_lock);
if (likely(ops != vsock_prot_saved)) {
vsock_bpf_rebuild_protos(&vsock_bpf_prot, ops);
/* Make sure proto function pointers are updated before publishing the
* pointer to the struct.
*/
smp_store_release(&vsock_prot_saved, ops);
}
spin_unlock_bh(&vsock_prot_lock);
}
}
int vsock_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore)
{
struct vsock_sock *vsk;
if (restore) {
sk->sk_write_space = psock->saved_write_space;
sock_replace_proto(sk, psock->sk_proto);
return 0;
}
vsk = vsock_sk(sk);
if (!vsk->transport)
return -ENODEV;
if (!vsk->transport->read_skb)
return -EOPNOTSUPP;
vsock_bpf_check_needs_rebuild(psock->sk_proto);
sock_replace_proto(sk, &vsock_bpf_prot);
return 0;
}
void __init vsock_bpf_build_proto(void)
{
vsock_bpf_rebuild_protos(&vsock_bpf_prot, &vsock_proto);
}
| linux-master | net/vmw_vsock/vsock_bpf.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* common code for virtio vsock
*
* Copyright (C) 2013-2015 Red Hat, Inc.
* Author: Asias He <[email protected]>
* Stefan Hajnoczi <[email protected]>
*/
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/sched/signal.h>
#include <linux/ctype.h>
#include <linux/list.h>
#include <linux/virtio_vsock.h>
#include <uapi/linux/vsockmon.h>
#include <net/sock.h>
#include <net/af_vsock.h>
#define CREATE_TRACE_POINTS
#include <trace/events/vsock_virtio_transport_common.h>
/* How long to wait for graceful shutdown of a connection */
#define VSOCK_CLOSE_TIMEOUT (8 * HZ)
/* Threshold for detecting small packets to copy */
#define GOOD_COPY_LEN 128
static const struct virtio_transport *
virtio_transport_get_ops(struct vsock_sock *vsk)
{
const struct vsock_transport *t = vsock_core_get_transport(vsk);
if (WARN_ON(!t))
return NULL;
return container_of(t, struct virtio_transport, transport);
}
/* Returns a new packet on success, otherwise returns NULL.
*
* If NULL is returned, errp is set to a negative errno.
*/
static struct sk_buff *
virtio_transport_alloc_skb(struct virtio_vsock_pkt_info *info,
size_t len,
u32 src_cid,
u32 src_port,
u32 dst_cid,
u32 dst_port)
{
const size_t skb_len = VIRTIO_VSOCK_SKB_HEADROOM + len;
struct virtio_vsock_hdr *hdr;
struct sk_buff *skb;
void *payload;
int err;
skb = virtio_vsock_alloc_skb(skb_len, GFP_KERNEL);
if (!skb)
return NULL;
hdr = virtio_vsock_hdr(skb);
hdr->type = cpu_to_le16(info->type);
hdr->op = cpu_to_le16(info->op);
hdr->src_cid = cpu_to_le64(src_cid);
hdr->dst_cid = cpu_to_le64(dst_cid);
hdr->src_port = cpu_to_le32(src_port);
hdr->dst_port = cpu_to_le32(dst_port);
hdr->flags = cpu_to_le32(info->flags);
hdr->len = cpu_to_le32(len);
if (info->msg && len > 0) {
payload = skb_put(skb, len);
err = memcpy_from_msg(payload, info->msg, len);
if (err)
goto out;
if (msg_data_left(info->msg) == 0 &&
info->type == VIRTIO_VSOCK_TYPE_SEQPACKET) {
hdr->flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
if (info->msg->msg_flags & MSG_EOR)
hdr->flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
}
}
if (info->reply)
virtio_vsock_skb_set_reply(skb);
trace_virtio_transport_alloc_pkt(src_cid, src_port,
dst_cid, dst_port,
len,
info->type,
info->op,
info->flags);
if (info->vsk && !skb_set_owner_sk_safe(skb, sk_vsock(info->vsk))) {
WARN_ONCE(1, "failed to allocate skb on vsock socket with sk_refcnt == 0\n");
goto out;
}
return skb;
out:
kfree_skb(skb);
return NULL;
}
/* Packet capture */
static struct sk_buff *virtio_transport_build_skb(void *opaque)
{
struct virtio_vsock_hdr *pkt_hdr;
struct sk_buff *pkt = opaque;
struct af_vsockmon_hdr *hdr;
struct sk_buff *skb;
size_t payload_len;
void *payload_buf;
/* A packet could be split to fit the RX buffer, so we can retrieve
* the payload length from the header and the buffer pointer taking
* care of the offset in the original packet.
*/
pkt_hdr = virtio_vsock_hdr(pkt);
payload_len = pkt->len;
payload_buf = pkt->data;
skb = alloc_skb(sizeof(*hdr) + sizeof(*pkt_hdr) + payload_len,
GFP_ATOMIC);
if (!skb)
return NULL;
hdr = skb_put(skb, sizeof(*hdr));
/* pkt->hdr is little-endian so no need to byteswap here */
hdr->src_cid = pkt_hdr->src_cid;
hdr->src_port = pkt_hdr->src_port;
hdr->dst_cid = pkt_hdr->dst_cid;
hdr->dst_port = pkt_hdr->dst_port;
hdr->transport = cpu_to_le16(AF_VSOCK_TRANSPORT_VIRTIO);
hdr->len = cpu_to_le16(sizeof(*pkt_hdr));
memset(hdr->reserved, 0, sizeof(hdr->reserved));
switch (le16_to_cpu(pkt_hdr->op)) {
case VIRTIO_VSOCK_OP_REQUEST:
case VIRTIO_VSOCK_OP_RESPONSE:
hdr->op = cpu_to_le16(AF_VSOCK_OP_CONNECT);
break;
case VIRTIO_VSOCK_OP_RST:
case VIRTIO_VSOCK_OP_SHUTDOWN:
hdr->op = cpu_to_le16(AF_VSOCK_OP_DISCONNECT);
break;
case VIRTIO_VSOCK_OP_RW:
hdr->op = cpu_to_le16(AF_VSOCK_OP_PAYLOAD);
break;
case VIRTIO_VSOCK_OP_CREDIT_UPDATE:
case VIRTIO_VSOCK_OP_CREDIT_REQUEST:
hdr->op = cpu_to_le16(AF_VSOCK_OP_CONTROL);
break;
default:
hdr->op = cpu_to_le16(AF_VSOCK_OP_UNKNOWN);
break;
}
skb_put_data(skb, pkt_hdr, sizeof(*pkt_hdr));
if (payload_len) {
skb_put_data(skb, payload_buf, payload_len);
}
return skb;
}
void virtio_transport_deliver_tap_pkt(struct sk_buff *skb)
{
if (virtio_vsock_skb_tap_delivered(skb))
return;
vsock_deliver_tap(virtio_transport_build_skb, skb);
virtio_vsock_skb_set_tap_delivered(skb);
}
EXPORT_SYMBOL_GPL(virtio_transport_deliver_tap_pkt);
static u16 virtio_transport_get_type(struct sock *sk)
{
if (sk->sk_type == SOCK_STREAM)
return VIRTIO_VSOCK_TYPE_STREAM;
else
return VIRTIO_VSOCK_TYPE_SEQPACKET;
}
/* This function can only be used on connecting/connected sockets,
* since a socket assigned to a transport is required.
*
* Do not use on listener sockets!
*/
static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
struct virtio_vsock_pkt_info *info)
{
u32 src_cid, src_port, dst_cid, dst_port;
const struct virtio_transport *t_ops;
struct virtio_vsock_sock *vvs;
u32 pkt_len = info->pkt_len;
u32 rest_len;
int ret;
info->type = virtio_transport_get_type(sk_vsock(vsk));
t_ops = virtio_transport_get_ops(vsk);
if (unlikely(!t_ops))
return -EFAULT;
src_cid = t_ops->transport.get_local_cid();
src_port = vsk->local_addr.svm_port;
if (!info->remote_cid) {
dst_cid = vsk->remote_addr.svm_cid;
dst_port = vsk->remote_addr.svm_port;
} else {
dst_cid = info->remote_cid;
dst_port = info->remote_port;
}
vvs = vsk->trans;
/* virtio_transport_get_credit might return less than pkt_len credit */
pkt_len = virtio_transport_get_credit(vvs, pkt_len);
/* Do not send zero length OP_RW pkt */
if (pkt_len == 0 && info->op == VIRTIO_VSOCK_OP_RW)
return pkt_len;
rest_len = pkt_len;
do {
struct sk_buff *skb;
size_t skb_len;
skb_len = min_t(u32, VIRTIO_VSOCK_MAX_PKT_BUF_SIZE, rest_len);
skb = virtio_transport_alloc_skb(info, skb_len,
src_cid, src_port,
dst_cid, dst_port);
if (!skb) {
ret = -ENOMEM;
break;
}
virtio_transport_inc_tx_pkt(vvs, skb);
ret = t_ops->send_pkt(skb);
if (ret < 0)
break;
/* Both virtio and vhost 'send_pkt()' returns 'skb_len',
* but for reliability use 'ret' instead of 'skb_len'.
* Also if partial send happens (e.g. 'ret' != 'skb_len')
* somehow, we break this loop, but account such returned
* value in 'virtio_transport_put_credit()'.
*/
rest_len -= ret;
if (WARN_ONCE(ret != skb_len,
"'send_pkt()' returns %i, but %zu expected\n",
ret, skb_len))
break;
} while (rest_len);
virtio_transport_put_credit(vvs, rest_len);
/* Return number of bytes, if any data has been sent. */
if (rest_len != pkt_len)
ret = pkt_len - rest_len;
return ret;
}
static bool virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs,
u32 len)
{
if (vvs->rx_bytes + len > vvs->buf_alloc)
return false;
vvs->rx_bytes += len;
return true;
}
static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs,
u32 len)
{
vvs->rx_bytes -= len;
vvs->fwd_cnt += len;
}
void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct sk_buff *skb)
{
struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
spin_lock_bh(&vvs->rx_lock);
vvs->last_fwd_cnt = vvs->fwd_cnt;
hdr->fwd_cnt = cpu_to_le32(vvs->fwd_cnt);
hdr->buf_alloc = cpu_to_le32(vvs->buf_alloc);
spin_unlock_bh(&vvs->rx_lock);
}
EXPORT_SYMBOL_GPL(virtio_transport_inc_tx_pkt);
u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 credit)
{
u32 ret;
if (!credit)
return 0;
spin_lock_bh(&vvs->tx_lock);
ret = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
if (ret > credit)
ret = credit;
vvs->tx_cnt += ret;
spin_unlock_bh(&vvs->tx_lock);
return ret;
}
EXPORT_SYMBOL_GPL(virtio_transport_get_credit);
void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit)
{
if (!credit)
return;
spin_lock_bh(&vvs->tx_lock);
vvs->tx_cnt -= credit;
spin_unlock_bh(&vvs->tx_lock);
}
EXPORT_SYMBOL_GPL(virtio_transport_put_credit);
static int virtio_transport_send_credit_update(struct vsock_sock *vsk)
{
struct virtio_vsock_pkt_info info = {
.op = VIRTIO_VSOCK_OP_CREDIT_UPDATE,
.vsk = vsk,
};
return virtio_transport_send_pkt_info(vsk, &info);
}
static ssize_t
virtio_transport_stream_do_peek(struct vsock_sock *vsk,
struct msghdr *msg,
size_t len)
{
struct virtio_vsock_sock *vvs = vsk->trans;
struct sk_buff *skb;
size_t total = 0;
int err;
spin_lock_bh(&vvs->rx_lock);
skb_queue_walk(&vvs->rx_queue, skb) {
size_t bytes;
bytes = len - total;
if (bytes > skb->len)
bytes = skb->len;
spin_unlock_bh(&vvs->rx_lock);
/* sk_lock is held by caller so no one else can dequeue.
* Unlock rx_lock since memcpy_to_msg() may sleep.
*/
err = memcpy_to_msg(msg, skb->data, bytes);
if (err)
goto out;
total += bytes;
spin_lock_bh(&vvs->rx_lock);
if (total == len)
break;
}
spin_unlock_bh(&vvs->rx_lock);
return total;
out:
if (total)
err = total;
return err;
}
static ssize_t
virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
struct msghdr *msg,
size_t len)
{
struct virtio_vsock_sock *vvs = vsk->trans;
size_t bytes, total = 0;
struct sk_buff *skb;
int err = -EFAULT;
u32 free_space;
spin_lock_bh(&vvs->rx_lock);
if (WARN_ONCE(skb_queue_empty(&vvs->rx_queue) && vvs->rx_bytes,
"rx_queue is empty, but rx_bytes is non-zero\n")) {
spin_unlock_bh(&vvs->rx_lock);
return err;
}
while (total < len && !skb_queue_empty(&vvs->rx_queue)) {
skb = skb_peek(&vvs->rx_queue);
bytes = len - total;
if (bytes > skb->len)
bytes = skb->len;
/* sk_lock is held by caller so no one else can dequeue.
* Unlock rx_lock since memcpy_to_msg() may sleep.
*/
spin_unlock_bh(&vvs->rx_lock);
err = memcpy_to_msg(msg, skb->data, bytes);
if (err)
goto out;
spin_lock_bh(&vvs->rx_lock);
total += bytes;
skb_pull(skb, bytes);
if (skb->len == 0) {
u32 pkt_len = le32_to_cpu(virtio_vsock_hdr(skb)->len);
virtio_transport_dec_rx_pkt(vvs, pkt_len);
__skb_unlink(skb, &vvs->rx_queue);
consume_skb(skb);
}
}
free_space = vvs->buf_alloc - (vvs->fwd_cnt - vvs->last_fwd_cnt);
spin_unlock_bh(&vvs->rx_lock);
/* To reduce the number of credit update messages,
* don't update credits as long as lots of space is available.
* Note: the limit chosen here is arbitrary. Setting the limit
* too high causes extra messages. Too low causes transmitter
* stalls. As stalls are in theory more expensive than extra
* messages, we set the limit to a high value. TODO: experiment
* with different values.
*/
if (free_space < VIRTIO_VSOCK_MAX_PKT_BUF_SIZE)
virtio_transport_send_credit_update(vsk);
return total;
out:
if (total)
err = total;
return err;
}
static ssize_t
virtio_transport_seqpacket_do_peek(struct vsock_sock *vsk,
struct msghdr *msg)
{
struct virtio_vsock_sock *vvs = vsk->trans;
struct sk_buff *skb;
size_t total, len;
spin_lock_bh(&vvs->rx_lock);
if (!vvs->msg_count) {
spin_unlock_bh(&vvs->rx_lock);
return 0;
}
total = 0;
len = msg_data_left(msg);
skb_queue_walk(&vvs->rx_queue, skb) {
struct virtio_vsock_hdr *hdr;
if (total < len) {
size_t bytes;
int err;
bytes = len - total;
if (bytes > skb->len)
bytes = skb->len;
spin_unlock_bh(&vvs->rx_lock);
/* sk_lock is held by caller so no one else can dequeue.
* Unlock rx_lock since memcpy_to_msg() may sleep.
*/
err = memcpy_to_msg(msg, skb->data, bytes);
if (err)
return err;
spin_lock_bh(&vvs->rx_lock);
}
total += skb->len;
hdr = virtio_vsock_hdr(skb);
if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) {
if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOR)
msg->msg_flags |= MSG_EOR;
break;
}
}
spin_unlock_bh(&vvs->rx_lock);
return total;
}
static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
struct msghdr *msg,
int flags)
{
struct virtio_vsock_sock *vvs = vsk->trans;
int dequeued_len = 0;
size_t user_buf_len = msg_data_left(msg);
bool msg_ready = false;
struct sk_buff *skb;
spin_lock_bh(&vvs->rx_lock);
if (vvs->msg_count == 0) {
spin_unlock_bh(&vvs->rx_lock);
return 0;
}
while (!msg_ready) {
struct virtio_vsock_hdr *hdr;
size_t pkt_len;
skb = __skb_dequeue(&vvs->rx_queue);
if (!skb)
break;
hdr = virtio_vsock_hdr(skb);
pkt_len = (size_t)le32_to_cpu(hdr->len);
if (dequeued_len >= 0) {
size_t bytes_to_copy;
bytes_to_copy = min(user_buf_len, pkt_len);
if (bytes_to_copy) {
int err;
/* sk_lock is held by caller so no one else can dequeue.
* Unlock rx_lock since memcpy_to_msg() may sleep.
*/
spin_unlock_bh(&vvs->rx_lock);
err = memcpy_to_msg(msg, skb->data, bytes_to_copy);
if (err) {
/* Copy of message failed. Rest of
* fragments will be freed without copy.
*/
dequeued_len = err;
} else {
user_buf_len -= bytes_to_copy;
}
spin_lock_bh(&vvs->rx_lock);
}
if (dequeued_len >= 0)
dequeued_len += pkt_len;
}
if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) {
msg_ready = true;
vvs->msg_count--;
if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOR)
msg->msg_flags |= MSG_EOR;
}
virtio_transport_dec_rx_pkt(vvs, pkt_len);
kfree_skb(skb);
}
spin_unlock_bh(&vvs->rx_lock);
virtio_transport_send_credit_update(vsk);
return dequeued_len;
}
ssize_t
virtio_transport_stream_dequeue(struct vsock_sock *vsk,
struct msghdr *msg,
size_t len, int flags)
{
if (flags & MSG_PEEK)
return virtio_transport_stream_do_peek(vsk, msg, len);
else
return virtio_transport_stream_do_dequeue(vsk, msg, len);
}
EXPORT_SYMBOL_GPL(virtio_transport_stream_dequeue);
ssize_t
virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk,
struct msghdr *msg,
int flags)
{
if (flags & MSG_PEEK)
return virtio_transport_seqpacket_do_peek(vsk, msg);
else
return virtio_transport_seqpacket_do_dequeue(vsk, msg, flags);
}
EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_dequeue);
int
virtio_transport_seqpacket_enqueue(struct vsock_sock *vsk,
struct msghdr *msg,
size_t len)
{
struct virtio_vsock_sock *vvs = vsk->trans;
spin_lock_bh(&vvs->tx_lock);
if (len > vvs->peer_buf_alloc) {
spin_unlock_bh(&vvs->tx_lock);
return -EMSGSIZE;
}
spin_unlock_bh(&vvs->tx_lock);
return virtio_transport_stream_enqueue(vsk, msg, len);
}
EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_enqueue);
int
virtio_transport_dgram_dequeue(struct vsock_sock *vsk,
struct msghdr *msg,
size_t len, int flags)
{
return -EOPNOTSUPP;
}
EXPORT_SYMBOL_GPL(virtio_transport_dgram_dequeue);
s64 virtio_transport_stream_has_data(struct vsock_sock *vsk)
{
struct virtio_vsock_sock *vvs = vsk->trans;
s64 bytes;
spin_lock_bh(&vvs->rx_lock);
bytes = vvs->rx_bytes;
spin_unlock_bh(&vvs->rx_lock);
return bytes;
}
EXPORT_SYMBOL_GPL(virtio_transport_stream_has_data);
u32 virtio_transport_seqpacket_has_data(struct vsock_sock *vsk)
{
struct virtio_vsock_sock *vvs = vsk->trans;
u32 msg_count;
spin_lock_bh(&vvs->rx_lock);
msg_count = vvs->msg_count;
spin_unlock_bh(&vvs->rx_lock);
return msg_count;
}
EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_has_data);
static s64 virtio_transport_has_space(struct vsock_sock *vsk)
{
struct virtio_vsock_sock *vvs = vsk->trans;
s64 bytes;
bytes = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
if (bytes < 0)
bytes = 0;
return bytes;
}
s64 virtio_transport_stream_has_space(struct vsock_sock *vsk)
{
struct virtio_vsock_sock *vvs = vsk->trans;
s64 bytes;
spin_lock_bh(&vvs->tx_lock);
bytes = virtio_transport_has_space(vsk);
spin_unlock_bh(&vvs->tx_lock);
return bytes;
}
EXPORT_SYMBOL_GPL(virtio_transport_stream_has_space);
int virtio_transport_do_socket_init(struct vsock_sock *vsk,
struct vsock_sock *psk)
{
struct virtio_vsock_sock *vvs;
vvs = kzalloc(sizeof(*vvs), GFP_KERNEL);
if (!vvs)
return -ENOMEM;
vsk->trans = vvs;
vvs->vsk = vsk;
if (psk && psk->trans) {
struct virtio_vsock_sock *ptrans = psk->trans;
vvs->peer_buf_alloc = ptrans->peer_buf_alloc;
}
if (vsk->buffer_size > VIRTIO_VSOCK_MAX_BUF_SIZE)
vsk->buffer_size = VIRTIO_VSOCK_MAX_BUF_SIZE;
vvs->buf_alloc = vsk->buffer_size;
spin_lock_init(&vvs->rx_lock);
spin_lock_init(&vvs->tx_lock);
skb_queue_head_init(&vvs->rx_queue);
return 0;
}
EXPORT_SYMBOL_GPL(virtio_transport_do_socket_init);
/* sk_lock held by the caller */
void virtio_transport_notify_buffer_size(struct vsock_sock *vsk, u64 *val)
{
struct virtio_vsock_sock *vvs = vsk->trans;
if (*val > VIRTIO_VSOCK_MAX_BUF_SIZE)
*val = VIRTIO_VSOCK_MAX_BUF_SIZE;
vvs->buf_alloc = *val;
virtio_transport_send_credit_update(vsk);
}
EXPORT_SYMBOL_GPL(virtio_transport_notify_buffer_size);
int
virtio_transport_notify_poll_in(struct vsock_sock *vsk,
size_t target,
bool *data_ready_now)
{
*data_ready_now = vsock_stream_has_data(vsk) >= target;
return 0;
}
EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_in);
int
virtio_transport_notify_poll_out(struct vsock_sock *vsk,
size_t target,
bool *space_avail_now)
{
s64 free_space;
free_space = vsock_stream_has_space(vsk);
if (free_space > 0)
*space_avail_now = true;
else if (free_space == 0)
*space_avail_now = false;
return 0;
}
EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_out);
int virtio_transport_notify_recv_init(struct vsock_sock *vsk,
size_t target, struct vsock_transport_recv_notify_data *data)
{
return 0;
}
EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_init);
int virtio_transport_notify_recv_pre_block(struct vsock_sock *vsk,
size_t target, struct vsock_transport_recv_notify_data *data)
{
return 0;
}
EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_block);
int virtio_transport_notify_recv_pre_dequeue(struct vsock_sock *vsk,
size_t target, struct vsock_transport_recv_notify_data *data)
{
return 0;
}
EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_dequeue);
int virtio_transport_notify_recv_post_dequeue(struct vsock_sock *vsk,
size_t target, ssize_t copied, bool data_read,
struct vsock_transport_recv_notify_data *data)
{
return 0;
}
EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_post_dequeue);
int virtio_transport_notify_send_init(struct vsock_sock *vsk,
struct vsock_transport_send_notify_data *data)
{
return 0;
}
EXPORT_SYMBOL_GPL(virtio_transport_notify_send_init);
int virtio_transport_notify_send_pre_block(struct vsock_sock *vsk,
struct vsock_transport_send_notify_data *data)
{
return 0;
}
EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_block);
int virtio_transport_notify_send_pre_enqueue(struct vsock_sock *vsk,
struct vsock_transport_send_notify_data *data)
{
return 0;
}
EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_enqueue);
int virtio_transport_notify_send_post_enqueue(struct vsock_sock *vsk,
ssize_t written, struct vsock_transport_send_notify_data *data)
{
return 0;
}
EXPORT_SYMBOL_GPL(virtio_transport_notify_send_post_enqueue);
u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk)
{
return vsk->buffer_size;
}
EXPORT_SYMBOL_GPL(virtio_transport_stream_rcvhiwat);
bool virtio_transport_stream_is_active(struct vsock_sock *vsk)
{
return true;
}
EXPORT_SYMBOL_GPL(virtio_transport_stream_is_active);
bool virtio_transport_stream_allow(u32 cid, u32 port)
{
return true;
}
EXPORT_SYMBOL_GPL(virtio_transport_stream_allow);
int virtio_transport_dgram_bind(struct vsock_sock *vsk,
struct sockaddr_vm *addr)
{
return -EOPNOTSUPP;
}
EXPORT_SYMBOL_GPL(virtio_transport_dgram_bind);
bool virtio_transport_dgram_allow(u32 cid, u32 port)
{
return false;
}
EXPORT_SYMBOL_GPL(virtio_transport_dgram_allow);
int virtio_transport_connect(struct vsock_sock *vsk)
{
struct virtio_vsock_pkt_info info = {
.op = VIRTIO_VSOCK_OP_REQUEST,
.vsk = vsk,
};
return virtio_transport_send_pkt_info(vsk, &info);
}
EXPORT_SYMBOL_GPL(virtio_transport_connect);
int virtio_transport_shutdown(struct vsock_sock *vsk, int mode)
{
struct virtio_vsock_pkt_info info = {
.op = VIRTIO_VSOCK_OP_SHUTDOWN,
.flags = (mode & RCV_SHUTDOWN ?
VIRTIO_VSOCK_SHUTDOWN_RCV : 0) |
(mode & SEND_SHUTDOWN ?
VIRTIO_VSOCK_SHUTDOWN_SEND : 0),
.vsk = vsk,
};
return virtio_transport_send_pkt_info(vsk, &info);
}
EXPORT_SYMBOL_GPL(virtio_transport_shutdown);
int
virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
struct sockaddr_vm *remote_addr,
struct msghdr *msg,
size_t dgram_len)
{
return -EOPNOTSUPP;
}
EXPORT_SYMBOL_GPL(virtio_transport_dgram_enqueue);
ssize_t
virtio_transport_stream_enqueue(struct vsock_sock *vsk,
struct msghdr *msg,
size_t len)
{
struct virtio_vsock_pkt_info info = {
.op = VIRTIO_VSOCK_OP_RW,
.msg = msg,
.pkt_len = len,
.vsk = vsk,
};
return virtio_transport_send_pkt_info(vsk, &info);
}
EXPORT_SYMBOL_GPL(virtio_transport_stream_enqueue);
void virtio_transport_destruct(struct vsock_sock *vsk)
{
struct virtio_vsock_sock *vvs = vsk->trans;
kfree(vvs);
}
EXPORT_SYMBOL_GPL(virtio_transport_destruct);
static int virtio_transport_reset(struct vsock_sock *vsk,
struct sk_buff *skb)
{
struct virtio_vsock_pkt_info info = {
.op = VIRTIO_VSOCK_OP_RST,
.reply = !!skb,
.vsk = vsk,
};
/* Send RST only if the original pkt is not a RST pkt */
if (skb && le16_to_cpu(virtio_vsock_hdr(skb)->op) == VIRTIO_VSOCK_OP_RST)
return 0;
return virtio_transport_send_pkt_info(vsk, &info);
}
/* Normally packets are associated with a socket. There may be no socket if an
* attempt was made to connect to a socket that does not exist.
*/
static int virtio_transport_reset_no_sock(const struct virtio_transport *t,
struct sk_buff *skb)
{
struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
struct virtio_vsock_pkt_info info = {
.op = VIRTIO_VSOCK_OP_RST,
.type = le16_to_cpu(hdr->type),
.reply = true,
};
struct sk_buff *reply;
/* Send RST only if the original pkt is not a RST pkt */
if (le16_to_cpu(hdr->op) == VIRTIO_VSOCK_OP_RST)
return 0;
if (!t)
return -ENOTCONN;
reply = virtio_transport_alloc_skb(&info, 0,
le64_to_cpu(hdr->dst_cid),
le32_to_cpu(hdr->dst_port),
le64_to_cpu(hdr->src_cid),
le32_to_cpu(hdr->src_port));
if (!reply)
return -ENOMEM;
return t->send_pkt(reply);
}
/* This function should be called with sk_lock held and SOCK_DONE set */
static void virtio_transport_remove_sock(struct vsock_sock *vsk)
{
struct virtio_vsock_sock *vvs = vsk->trans;
/* We don't need to take rx_lock, as the socket is closing and we are
* removing it.
*/
__skb_queue_purge(&vvs->rx_queue);
vsock_remove_sock(vsk);
}
static void virtio_transport_wait_close(struct sock *sk, long timeout)
{
if (timeout) {
DEFINE_WAIT_FUNC(wait, woken_wake_function);
add_wait_queue(sk_sleep(sk), &wait);
do {
if (sk_wait_event(sk, &timeout,
sock_flag(sk, SOCK_DONE), &wait))
break;
} while (!signal_pending(current) && timeout);
remove_wait_queue(sk_sleep(sk), &wait);
}
}
static void virtio_transport_do_close(struct vsock_sock *vsk,
bool cancel_timeout)
{
struct sock *sk = sk_vsock(vsk);
sock_set_flag(sk, SOCK_DONE);
vsk->peer_shutdown = SHUTDOWN_MASK;
if (vsock_stream_has_data(vsk) <= 0)
sk->sk_state = TCP_CLOSING;
sk->sk_state_change(sk);
if (vsk->close_work_scheduled &&
(!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
vsk->close_work_scheduled = false;
virtio_transport_remove_sock(vsk);
/* Release refcnt obtained when we scheduled the timeout */
sock_put(sk);
}
}
static void virtio_transport_close_timeout(struct work_struct *work)
{
struct vsock_sock *vsk =
container_of(work, struct vsock_sock, close_work.work);
struct sock *sk = sk_vsock(vsk);
sock_hold(sk);
lock_sock(sk);
if (!sock_flag(sk, SOCK_DONE)) {
(void)virtio_transport_reset(vsk, NULL);
virtio_transport_do_close(vsk, false);
}
vsk->close_work_scheduled = false;
release_sock(sk);
sock_put(sk);
}
/* User context, vsk->sk is locked */
static bool virtio_transport_close(struct vsock_sock *vsk)
{
struct sock *sk = &vsk->sk;
if (!(sk->sk_state == TCP_ESTABLISHED ||
sk->sk_state == TCP_CLOSING))
return true;
/* Already received SHUTDOWN from peer, reply with RST */
if ((vsk->peer_shutdown & SHUTDOWN_MASK) == SHUTDOWN_MASK) {
(void)virtio_transport_reset(vsk, NULL);
return true;
}
if ((sk->sk_shutdown & SHUTDOWN_MASK) != SHUTDOWN_MASK)
(void)virtio_transport_shutdown(vsk, SHUTDOWN_MASK);
if (sock_flag(sk, SOCK_LINGER) && !(current->flags & PF_EXITING))
virtio_transport_wait_close(sk, sk->sk_lingertime);
if (sock_flag(sk, SOCK_DONE)) {
return true;
}
sock_hold(sk);
INIT_DELAYED_WORK(&vsk->close_work,
virtio_transport_close_timeout);
vsk->close_work_scheduled = true;
schedule_delayed_work(&vsk->close_work, VSOCK_CLOSE_TIMEOUT);
return false;
}
void virtio_transport_release(struct vsock_sock *vsk)
{
struct sock *sk = &vsk->sk;
bool remove_sock = true;
if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)
remove_sock = virtio_transport_close(vsk);
if (remove_sock) {
sock_set_flag(sk, SOCK_DONE);
virtio_transport_remove_sock(vsk);
}
}
EXPORT_SYMBOL_GPL(virtio_transport_release);
static int
virtio_transport_recv_connecting(struct sock *sk,
struct sk_buff *skb)
{
struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
struct vsock_sock *vsk = vsock_sk(sk);
int skerr;
int err;
switch (le16_to_cpu(hdr->op)) {
case VIRTIO_VSOCK_OP_RESPONSE:
sk->sk_state = TCP_ESTABLISHED;
sk->sk_socket->state = SS_CONNECTED;
vsock_insert_connected(vsk);
sk->sk_state_change(sk);
break;
case VIRTIO_VSOCK_OP_INVALID:
break;
case VIRTIO_VSOCK_OP_RST:
skerr = ECONNRESET;
err = 0;
goto destroy;
default:
skerr = EPROTO;
err = -EINVAL;
goto destroy;
}
return 0;
destroy:
virtio_transport_reset(vsk, skb);
sk->sk_state = TCP_CLOSE;
sk->sk_err = skerr;
sk_error_report(sk);
return err;
}
static void
virtio_transport_recv_enqueue(struct vsock_sock *vsk,
struct sk_buff *skb)
{
struct virtio_vsock_sock *vvs = vsk->trans;
bool can_enqueue, free_pkt = false;
struct virtio_vsock_hdr *hdr;
u32 len;
hdr = virtio_vsock_hdr(skb);
len = le32_to_cpu(hdr->len);
spin_lock_bh(&vvs->rx_lock);
can_enqueue = virtio_transport_inc_rx_pkt(vvs, len);
if (!can_enqueue) {
free_pkt = true;
goto out;
}
if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM)
vvs->msg_count++;
/* Try to copy small packets into the buffer of last packet queued,
* to avoid wasting memory queueing the entire buffer with a small
* payload.
*/
if (len <= GOOD_COPY_LEN && !skb_queue_empty(&vvs->rx_queue)) {
struct virtio_vsock_hdr *last_hdr;
struct sk_buff *last_skb;
last_skb = skb_peek_tail(&vvs->rx_queue);
last_hdr = virtio_vsock_hdr(last_skb);
/* If there is space in the last packet queued, we copy the
* new packet in its buffer. We avoid this if the last packet
* queued has VIRTIO_VSOCK_SEQ_EOM set, because this is
* delimiter of SEQPACKET message, so 'pkt' is the first packet
* of a new message.
*/
if (skb->len < skb_tailroom(last_skb) &&
!(le32_to_cpu(last_hdr->flags) & VIRTIO_VSOCK_SEQ_EOM)) {
memcpy(skb_put(last_skb, skb->len), skb->data, skb->len);
free_pkt = true;
last_hdr->flags |= hdr->flags;
le32_add_cpu(&last_hdr->len, len);
goto out;
}
}
__skb_queue_tail(&vvs->rx_queue, skb);
out:
spin_unlock_bh(&vvs->rx_lock);
if (free_pkt)
kfree_skb(skb);
}
static int
virtio_transport_recv_connected(struct sock *sk,
struct sk_buff *skb)
{
struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
struct vsock_sock *vsk = vsock_sk(sk);
int err = 0;
switch (le16_to_cpu(hdr->op)) {
case VIRTIO_VSOCK_OP_RW:
virtio_transport_recv_enqueue(vsk, skb);
vsock_data_ready(sk);
return err;
case VIRTIO_VSOCK_OP_CREDIT_REQUEST:
virtio_transport_send_credit_update(vsk);
break;
case VIRTIO_VSOCK_OP_CREDIT_UPDATE:
sk->sk_write_space(sk);
break;
case VIRTIO_VSOCK_OP_SHUTDOWN:
if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SHUTDOWN_RCV)
vsk->peer_shutdown |= RCV_SHUTDOWN;
if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
vsk->peer_shutdown |= SEND_SHUTDOWN;
if (vsk->peer_shutdown == SHUTDOWN_MASK &&
vsock_stream_has_data(vsk) <= 0 &&
!sock_flag(sk, SOCK_DONE)) {
(void)virtio_transport_reset(vsk, NULL);
virtio_transport_do_close(vsk, true);
}
if (le32_to_cpu(virtio_vsock_hdr(skb)->flags))
sk->sk_state_change(sk);
break;
case VIRTIO_VSOCK_OP_RST:
virtio_transport_do_close(vsk, true);
break;
default:
err = -EINVAL;
break;
}
kfree_skb(skb);
return err;
}
static void
virtio_transport_recv_disconnecting(struct sock *sk,
struct sk_buff *skb)
{
struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
struct vsock_sock *vsk = vsock_sk(sk);
if (le16_to_cpu(hdr->op) == VIRTIO_VSOCK_OP_RST)
virtio_transport_do_close(vsk, true);
}
static int
virtio_transport_send_response(struct vsock_sock *vsk,
struct sk_buff *skb)
{
struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
struct virtio_vsock_pkt_info info = {
.op = VIRTIO_VSOCK_OP_RESPONSE,
.remote_cid = le64_to_cpu(hdr->src_cid),
.remote_port = le32_to_cpu(hdr->src_port),
.reply = true,
.vsk = vsk,
};
return virtio_transport_send_pkt_info(vsk, &info);
}
static bool virtio_transport_space_update(struct sock *sk,
struct sk_buff *skb)
{
struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
struct vsock_sock *vsk = vsock_sk(sk);
struct virtio_vsock_sock *vvs = vsk->trans;
bool space_available;
/* Listener sockets are not associated with any transport, so we are
* not able to take the state to see if there is space available in the
* remote peer, but since they are only used to receive requests, we
* can assume that there is always space available in the other peer.
*/
if (!vvs)
return true;
/* buf_alloc and fwd_cnt is always included in the hdr */
spin_lock_bh(&vvs->tx_lock);
vvs->peer_buf_alloc = le32_to_cpu(hdr->buf_alloc);
vvs->peer_fwd_cnt = le32_to_cpu(hdr->fwd_cnt);
space_available = virtio_transport_has_space(vsk);
spin_unlock_bh(&vvs->tx_lock);
return space_available;
}
/* Handle server socket */
static int
virtio_transport_recv_listen(struct sock *sk, struct sk_buff *skb,
struct virtio_transport *t)
{
struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
struct vsock_sock *vsk = vsock_sk(sk);
struct vsock_sock *vchild;
struct sock *child;
int ret;
if (le16_to_cpu(hdr->op) != VIRTIO_VSOCK_OP_REQUEST) {
virtio_transport_reset_no_sock(t, skb);
return -EINVAL;
}
if (sk_acceptq_is_full(sk)) {
virtio_transport_reset_no_sock(t, skb);
return -ENOMEM;
}
child = vsock_create_connected(sk);
if (!child) {
virtio_transport_reset_no_sock(t, skb);
return -ENOMEM;
}
sk_acceptq_added(sk);
lock_sock_nested(child, SINGLE_DEPTH_NESTING);
child->sk_state = TCP_ESTABLISHED;
vchild = vsock_sk(child);
vsock_addr_init(&vchild->local_addr, le64_to_cpu(hdr->dst_cid),
le32_to_cpu(hdr->dst_port));
vsock_addr_init(&vchild->remote_addr, le64_to_cpu(hdr->src_cid),
le32_to_cpu(hdr->src_port));
ret = vsock_assign_transport(vchild, vsk);
/* Transport assigned (looking at remote_addr) must be the same
* where we received the request.
*/
if (ret || vchild->transport != &t->transport) {
release_sock(child);
virtio_transport_reset_no_sock(t, skb);
sock_put(child);
return ret;
}
if (virtio_transport_space_update(child, skb))
child->sk_write_space(child);
vsock_insert_connected(vchild);
vsock_enqueue_accept(sk, child);
virtio_transport_send_response(vchild, skb);
release_sock(child);
sk->sk_data_ready(sk);
return 0;
}
static bool virtio_transport_valid_type(u16 type)
{
return (type == VIRTIO_VSOCK_TYPE_STREAM) ||
(type == VIRTIO_VSOCK_TYPE_SEQPACKET);
}
/* We are under the virtio-vsock's vsock->rx_lock or vhost-vsock's vq->mutex
* lock.
*/
void virtio_transport_recv_pkt(struct virtio_transport *t,
struct sk_buff *skb)
{
struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
struct sockaddr_vm src, dst;
struct vsock_sock *vsk;
struct sock *sk;
bool space_available;
vsock_addr_init(&src, le64_to_cpu(hdr->src_cid),
le32_to_cpu(hdr->src_port));
vsock_addr_init(&dst, le64_to_cpu(hdr->dst_cid),
le32_to_cpu(hdr->dst_port));
trace_virtio_transport_recv_pkt(src.svm_cid, src.svm_port,
dst.svm_cid, dst.svm_port,
le32_to_cpu(hdr->len),
le16_to_cpu(hdr->type),
le16_to_cpu(hdr->op),
le32_to_cpu(hdr->flags),
le32_to_cpu(hdr->buf_alloc),
le32_to_cpu(hdr->fwd_cnt));
if (!virtio_transport_valid_type(le16_to_cpu(hdr->type))) {
(void)virtio_transport_reset_no_sock(t, skb);
goto free_pkt;
}
/* The socket must be in connected or bound table
* otherwise send reset back
*/
sk = vsock_find_connected_socket(&src, &dst);
if (!sk) {
sk = vsock_find_bound_socket(&dst);
if (!sk) {
(void)virtio_transport_reset_no_sock(t, skb);
goto free_pkt;
}
}
if (virtio_transport_get_type(sk) != le16_to_cpu(hdr->type)) {
(void)virtio_transport_reset_no_sock(t, skb);
sock_put(sk);
goto free_pkt;
}
if (!skb_set_owner_sk_safe(skb, sk)) {
WARN_ONCE(1, "receiving vsock socket has sk_refcnt == 0\n");
goto free_pkt;
}
vsk = vsock_sk(sk);
lock_sock(sk);
/* Check if sk has been closed before lock_sock */
if (sock_flag(sk, SOCK_DONE)) {
(void)virtio_transport_reset_no_sock(t, skb);
release_sock(sk);
sock_put(sk);
goto free_pkt;
}
space_available = virtio_transport_space_update(sk, skb);
/* Update CID in case it has changed after a transport reset event */
if (vsk->local_addr.svm_cid != VMADDR_CID_ANY)
vsk->local_addr.svm_cid = dst.svm_cid;
if (space_available)
sk->sk_write_space(sk);
switch (sk->sk_state) {
case TCP_LISTEN:
virtio_transport_recv_listen(sk, skb, t);
kfree_skb(skb);
break;
case TCP_SYN_SENT:
virtio_transport_recv_connecting(sk, skb);
kfree_skb(skb);
break;
case TCP_ESTABLISHED:
virtio_transport_recv_connected(sk, skb);
break;
case TCP_CLOSING:
virtio_transport_recv_disconnecting(sk, skb);
kfree_skb(skb);
break;
default:
(void)virtio_transport_reset_no_sock(t, skb);
kfree_skb(skb);
break;
}
release_sock(sk);
/* Release refcnt obtained when we fetched this socket out of the
* bound or connected list.
*/
sock_put(sk);
return;
free_pkt:
kfree_skb(skb);
}
EXPORT_SYMBOL_GPL(virtio_transport_recv_pkt);
/* Remove skbs found in a queue that have a vsk that matches.
*
* Each skb is freed.
*
* Returns the count of skbs that were reply packets.
*/
int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *queue)
{
struct sk_buff_head freeme;
struct sk_buff *skb, *tmp;
int cnt = 0;
skb_queue_head_init(&freeme);
spin_lock_bh(&queue->lock);
skb_queue_walk_safe(queue, skb, tmp) {
if (vsock_sk(skb->sk) != vsk)
continue;
__skb_unlink(skb, queue);
__skb_queue_tail(&freeme, skb);
if (virtio_vsock_skb_reply(skb))
cnt++;
}
spin_unlock_bh(&queue->lock);
__skb_queue_purge(&freeme);
return cnt;
}
EXPORT_SYMBOL_GPL(virtio_transport_purge_skbs);
int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t recv_actor)
{
struct virtio_vsock_sock *vvs = vsk->trans;
struct sock *sk = sk_vsock(vsk);
struct sk_buff *skb;
int off = 0;
int err;
spin_lock_bh(&vvs->rx_lock);
/* Use __skb_recv_datagram() for race-free handling of the receive. It
* works for types other than dgrams.
*/
skb = __skb_recv_datagram(sk, &vvs->rx_queue, MSG_DONTWAIT, &off, &err);
spin_unlock_bh(&vvs->rx_lock);
if (!skb)
return err;
return recv_actor(sk, skb);
}
EXPORT_SYMBOL_GPL(virtio_transport_read_skb);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Asias He");
MODULE_DESCRIPTION("common code for virtio vsock");
| linux-master | net/vmw_vsock/virtio_transport_common.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2010, Intel Corporation.
*
* Author: John Fastabend <[email protected]>
*/
#include <linux/rtnetlink.h>
#include <linux/notifier.h>
#include <linux/export.h>
#include <net/dcbevent.h>
static ATOMIC_NOTIFIER_HEAD(dcbevent_notif_chain);
int register_dcbevent_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&dcbevent_notif_chain, nb);
}
EXPORT_SYMBOL(register_dcbevent_notifier);
int unregister_dcbevent_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_unregister(&dcbevent_notif_chain, nb);
}
EXPORT_SYMBOL(unregister_dcbevent_notifier);
int call_dcbevent_notifiers(unsigned long val, void *v)
{
return atomic_notifier_call_chain(&dcbevent_notif_chain, val, v);
}
| linux-master | net/dcb/dcbevent.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008-2011, Intel Corporation.
*
* Description: Data Center Bridging netlink interface
* Author: Lucy Liu <[email protected]>
*/
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/slab.h>
#include <net/netlink.h>
#include <net/rtnetlink.h>
#include <linux/dcbnl.h>
#include <net/dcbevent.h>
#include <linux/rtnetlink.h>
#include <linux/init.h>
#include <net/sock.h>
/* Data Center Bridging (DCB) is a collection of Ethernet enhancements
* intended to allow network traffic with differing requirements
* (highly reliable, no drops vs. best effort vs. low latency) to operate
* and co-exist on Ethernet. Current DCB features are:
*
* Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a
* framework for assigning bandwidth guarantees to traffic classes.
*
* Priority-based Flow Control (PFC) - provides a flow control mechanism which
* can work independently for each 802.1p priority.
*
* Congestion Notification - provides a mechanism for end-to-end congestion
* control for protocols which do not have built-in congestion management.
*
* More information about the emerging standards for these Ethernet features
* can be found at: http://www.ieee802.org/1/pages/dcbridges.html
*
* This file implements an rtnetlink interface to allow configuration of DCB
* features for capable devices.
*/
/**************** DCB attribute policies *************************************/
/* DCB netlink attributes policy */
static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
[DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1},
[DCB_ATTR_STATE] = {.type = NLA_U8},
[DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED},
[DCB_ATTR_PG_CFG] = {.type = NLA_NESTED},
[DCB_ATTR_SET_ALL] = {.type = NLA_U8},
[DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG},
[DCB_ATTR_CAP] = {.type = NLA_NESTED},
[DCB_ATTR_PFC_STATE] = {.type = NLA_U8},
[DCB_ATTR_BCN] = {.type = NLA_NESTED},
[DCB_ATTR_APP] = {.type = NLA_NESTED},
[DCB_ATTR_IEEE] = {.type = NLA_NESTED},
[DCB_ATTR_DCBX] = {.type = NLA_U8},
[DCB_ATTR_FEATCFG] = {.type = NLA_NESTED},
};
/* DCB priority flow control to User Priority nested attributes */
static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
[DCB_PFC_UP_ATTR_0] = {.type = NLA_U8},
[DCB_PFC_UP_ATTR_1] = {.type = NLA_U8},
[DCB_PFC_UP_ATTR_2] = {.type = NLA_U8},
[DCB_PFC_UP_ATTR_3] = {.type = NLA_U8},
[DCB_PFC_UP_ATTR_4] = {.type = NLA_U8},
[DCB_PFC_UP_ATTR_5] = {.type = NLA_U8},
[DCB_PFC_UP_ATTR_6] = {.type = NLA_U8},
[DCB_PFC_UP_ATTR_7] = {.type = NLA_U8},
[DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG},
};
/* DCB priority grouping nested attributes */
static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
[DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED},
[DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED},
[DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED},
[DCB_PG_ATTR_TC_3] = {.type = NLA_NESTED},
[DCB_PG_ATTR_TC_4] = {.type = NLA_NESTED},
[DCB_PG_ATTR_TC_5] = {.type = NLA_NESTED},
[DCB_PG_ATTR_TC_6] = {.type = NLA_NESTED},
[DCB_PG_ATTR_TC_7] = {.type = NLA_NESTED},
[DCB_PG_ATTR_TC_ALL] = {.type = NLA_NESTED},
[DCB_PG_ATTR_BW_ID_0] = {.type = NLA_U8},
[DCB_PG_ATTR_BW_ID_1] = {.type = NLA_U8},
[DCB_PG_ATTR_BW_ID_2] = {.type = NLA_U8},
[DCB_PG_ATTR_BW_ID_3] = {.type = NLA_U8},
[DCB_PG_ATTR_BW_ID_4] = {.type = NLA_U8},
[DCB_PG_ATTR_BW_ID_5] = {.type = NLA_U8},
[DCB_PG_ATTR_BW_ID_6] = {.type = NLA_U8},
[DCB_PG_ATTR_BW_ID_7] = {.type = NLA_U8},
[DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG},
};
/* DCB traffic class nested attributes. */
static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
[DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8},
[DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8},
[DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8},
[DCB_TC_ATTR_PARAM_BW_PCT] = {.type = NLA_U8},
[DCB_TC_ATTR_PARAM_ALL] = {.type = NLA_FLAG},
};
/* DCB capabilities nested attributes. */
static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
[DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG},
[DCB_CAP_ATTR_PG] = {.type = NLA_U8},
[DCB_CAP_ATTR_PFC] = {.type = NLA_U8},
[DCB_CAP_ATTR_UP2TC] = {.type = NLA_U8},
[DCB_CAP_ATTR_PG_TCS] = {.type = NLA_U8},
[DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8},
[DCB_CAP_ATTR_GSP] = {.type = NLA_U8},
[DCB_CAP_ATTR_BCN] = {.type = NLA_U8},
[DCB_CAP_ATTR_DCBX] = {.type = NLA_U8},
};
/* DCB capabilities nested attributes. */
static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = {
[DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG},
[DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8},
[DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8},
};
/* DCB BCN nested attributes. */
static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
[DCB_BCN_ATTR_RP_0] = {.type = NLA_U8},
[DCB_BCN_ATTR_RP_1] = {.type = NLA_U8},
[DCB_BCN_ATTR_RP_2] = {.type = NLA_U8},
[DCB_BCN_ATTR_RP_3] = {.type = NLA_U8},
[DCB_BCN_ATTR_RP_4] = {.type = NLA_U8},
[DCB_BCN_ATTR_RP_5] = {.type = NLA_U8},
[DCB_BCN_ATTR_RP_6] = {.type = NLA_U8},
[DCB_BCN_ATTR_RP_7] = {.type = NLA_U8},
[DCB_BCN_ATTR_RP_ALL] = {.type = NLA_FLAG},
[DCB_BCN_ATTR_BCNA_0] = {.type = NLA_U32},
[DCB_BCN_ATTR_BCNA_1] = {.type = NLA_U32},
[DCB_BCN_ATTR_ALPHA] = {.type = NLA_U32},
[DCB_BCN_ATTR_BETA] = {.type = NLA_U32},
[DCB_BCN_ATTR_GD] = {.type = NLA_U32},
[DCB_BCN_ATTR_GI] = {.type = NLA_U32},
[DCB_BCN_ATTR_TMAX] = {.type = NLA_U32},
[DCB_BCN_ATTR_TD] = {.type = NLA_U32},
[DCB_BCN_ATTR_RMIN] = {.type = NLA_U32},
[DCB_BCN_ATTR_W] = {.type = NLA_U32},
[DCB_BCN_ATTR_RD] = {.type = NLA_U32},
[DCB_BCN_ATTR_RU] = {.type = NLA_U32},
[DCB_BCN_ATTR_WRTT] = {.type = NLA_U32},
[DCB_BCN_ATTR_RI] = {.type = NLA_U32},
[DCB_BCN_ATTR_C] = {.type = NLA_U32},
[DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG},
};
/* DCB APP nested attributes. */
static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
[DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8},
[DCB_APP_ATTR_ID] = {.type = NLA_U16},
[DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8},
};
/* IEEE 802.1Qaz nested attributes. */
static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
[DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)},
[DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)},
[DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED},
[DCB_ATTR_IEEE_MAXRATE] = {.len = sizeof(struct ieee_maxrate)},
[DCB_ATTR_IEEE_QCN] = {.len = sizeof(struct ieee_qcn)},
[DCB_ATTR_IEEE_QCN_STATS] = {.len = sizeof(struct ieee_qcn_stats)},
[DCB_ATTR_DCB_BUFFER] = {.len = sizeof(struct dcbnl_buffer)},
[DCB_ATTR_DCB_APP_TRUST_TABLE] = {.type = NLA_NESTED},
};
/* DCB number of traffic classes nested attributes. */
static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = {
[DCB_FEATCFG_ATTR_ALL] = {.type = NLA_FLAG},
[DCB_FEATCFG_ATTR_PG] = {.type = NLA_U8},
[DCB_FEATCFG_ATTR_PFC] = {.type = NLA_U8},
[DCB_FEATCFG_ATTR_APP] = {.type = NLA_U8},
};
static LIST_HEAD(dcb_app_list);
static LIST_HEAD(dcb_rewr_list);
static DEFINE_SPINLOCK(dcb_lock);
static enum ieee_attrs_app dcbnl_app_attr_type_get(u8 selector)
{
switch (selector) {
case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
case IEEE_8021QAZ_APP_SEL_STREAM:
case IEEE_8021QAZ_APP_SEL_DGRAM:
case IEEE_8021QAZ_APP_SEL_ANY:
case IEEE_8021QAZ_APP_SEL_DSCP:
return DCB_ATTR_IEEE_APP;
case DCB_APP_SEL_PCP:
return DCB_ATTR_DCB_APP;
default:
return DCB_ATTR_IEEE_APP_UNSPEC;
}
}
static bool dcbnl_app_attr_type_validate(enum ieee_attrs_app type)
{
switch (type) {
case DCB_ATTR_IEEE_APP:
case DCB_ATTR_DCB_APP:
return true;
default:
return false;
}
}
static bool dcbnl_app_selector_validate(enum ieee_attrs_app type, u8 selector)
{
return dcbnl_app_attr_type_get(selector) == type;
}
static struct sk_buff *dcbnl_newmsg(int type, u8 cmd, u32 port, u32 seq,
u32 flags, struct nlmsghdr **nlhp)
{
struct sk_buff *skb;
struct dcbmsg *dcb;
struct nlmsghdr *nlh;
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb)
return NULL;
nlh = nlmsg_put(skb, port, seq, type, sizeof(*dcb), flags);
BUG_ON(!nlh);
dcb = nlmsg_data(nlh);
dcb->dcb_family = AF_UNSPEC;
dcb->cmd = cmd;
dcb->dcb_pad = 0;
if (nlhp)
*nlhp = nlh;
return skb;
}
static int dcbnl_getstate(struct net_device *netdev, struct nlmsghdr *nlh,
u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
/* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
if (!netdev->dcbnl_ops->getstate)
return -EOPNOTSUPP;
return nla_put_u8(skb, DCB_ATTR_STATE,
netdev->dcbnl_ops->getstate(netdev));
}
static int dcbnl_getpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest;
u8 value;
int ret;
int i;
int getall = 0;
if (!tb[DCB_ATTR_PFC_CFG])
return -EINVAL;
if (!netdev->dcbnl_ops->getpfccfg)
return -EOPNOTSUPP;
ret = nla_parse_nested_deprecated(data, DCB_PFC_UP_ATTR_MAX,
tb[DCB_ATTR_PFC_CFG],
dcbnl_pfc_up_nest, NULL);
if (ret)
return ret;
nest = nla_nest_start_noflag(skb, DCB_ATTR_PFC_CFG);
if (!nest)
return -EMSGSIZE;
if (data[DCB_PFC_UP_ATTR_ALL])
getall = 1;
for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
if (!getall && !data[i])
continue;
netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0,
&value);
ret = nla_put_u8(skb, i, value);
if (ret) {
nla_nest_cancel(skb, nest);
return ret;
}
}
nla_nest_end(skb, nest);
return 0;
}
static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh,
u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
u8 perm_addr[MAX_ADDR_LEN];
if (!netdev->dcbnl_ops->getpermhwaddr)
return -EOPNOTSUPP;
memset(perm_addr, 0, sizeof(perm_addr));
netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr);
}
static int dcbnl_getcap(struct net_device *netdev, struct nlmsghdr *nlh,
u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest;
u8 value;
int ret;
int i;
int getall = 0;
if (!tb[DCB_ATTR_CAP])
return -EINVAL;
if (!netdev->dcbnl_ops->getcap)
return -EOPNOTSUPP;
ret = nla_parse_nested_deprecated(data, DCB_CAP_ATTR_MAX,
tb[DCB_ATTR_CAP], dcbnl_cap_nest,
NULL);
if (ret)
return ret;
nest = nla_nest_start_noflag(skb, DCB_ATTR_CAP);
if (!nest)
return -EMSGSIZE;
if (data[DCB_CAP_ATTR_ALL])
getall = 1;
for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) {
if (!getall && !data[i])
continue;
if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) {
ret = nla_put_u8(skb, i, value);
if (ret) {
nla_nest_cancel(skb, nest);
return ret;
}
}
}
nla_nest_end(skb, nest);
return 0;
}
static int dcbnl_getnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest;
u8 value;
int ret;
int i;
int getall = 0;
if (!tb[DCB_ATTR_NUMTCS])
return -EINVAL;
if (!netdev->dcbnl_ops->getnumtcs)
return -EOPNOTSUPP;
ret = nla_parse_nested_deprecated(data, DCB_NUMTCS_ATTR_MAX,
tb[DCB_ATTR_NUMTCS],
dcbnl_numtcs_nest, NULL);
if (ret)
return ret;
nest = nla_nest_start_noflag(skb, DCB_ATTR_NUMTCS);
if (!nest)
return -EMSGSIZE;
if (data[DCB_NUMTCS_ATTR_ALL])
getall = 1;
for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
if (!getall && !data[i])
continue;
ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value);
if (!ret) {
ret = nla_put_u8(skb, i, value);
if (ret) {
nla_nest_cancel(skb, nest);
return ret;
}
} else
return -EINVAL;
}
nla_nest_end(skb, nest);
return 0;
}
static int dcbnl_setnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1];
int ret;
u8 value;
int i;
if (!tb[DCB_ATTR_NUMTCS])
return -EINVAL;
if (!netdev->dcbnl_ops->setnumtcs)
return -EOPNOTSUPP;
ret = nla_parse_nested_deprecated(data, DCB_NUMTCS_ATTR_MAX,
tb[DCB_ATTR_NUMTCS],
dcbnl_numtcs_nest, NULL);
if (ret)
return ret;
for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
if (data[i] == NULL)
continue;
value = nla_get_u8(data[i]);
ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value);
if (ret)
break;
}
return nla_put_u8(skb, DCB_ATTR_NUMTCS, !!ret);
}
static int dcbnl_getpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
if (!netdev->dcbnl_ops->getpfcstate)
return -EOPNOTSUPP;
return nla_put_u8(skb, DCB_ATTR_PFC_STATE,
netdev->dcbnl_ops->getpfcstate(netdev));
}
static int dcbnl_setpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
u8 value;
if (!tb[DCB_ATTR_PFC_STATE])
return -EINVAL;
if (!netdev->dcbnl_ops->setpfcstate)
return -EOPNOTSUPP;
value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]);
netdev->dcbnl_ops->setpfcstate(netdev, value);
return nla_put_u8(skb, DCB_ATTR_PFC_STATE, 0);
}
static int dcbnl_getapp(struct net_device *netdev, struct nlmsghdr *nlh,
u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
struct nlattr *app_nest;
struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
u16 id;
u8 up, idtype;
int ret;
if (!tb[DCB_ATTR_APP])
return -EINVAL;
ret = nla_parse_nested_deprecated(app_tb, DCB_APP_ATTR_MAX,
tb[DCB_ATTR_APP], dcbnl_app_nest,
NULL);
if (ret)
return ret;
/* all must be non-null */
if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
(!app_tb[DCB_APP_ATTR_ID]))
return -EINVAL;
/* either by eth type or by socket number */
idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
(idtype != DCB_APP_IDTYPE_PORTNUM))
return -EINVAL;
id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
if (netdev->dcbnl_ops->getapp) {
ret = netdev->dcbnl_ops->getapp(netdev, idtype, id);
if (ret < 0)
return ret;
else
up = ret;
} else {
struct dcb_app app = {
.selector = idtype,
.protocol = id,
};
up = dcb_getapp(netdev, &app);
}
app_nest = nla_nest_start_noflag(skb, DCB_ATTR_APP);
if (!app_nest)
return -EMSGSIZE;
ret = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, idtype);
if (ret)
goto out_cancel;
ret = nla_put_u16(skb, DCB_APP_ATTR_ID, id);
if (ret)
goto out_cancel;
ret = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, up);
if (ret)
goto out_cancel;
nla_nest_end(skb, app_nest);
return 0;
out_cancel:
nla_nest_cancel(skb, app_nest);
return ret;
}
static int dcbnl_setapp(struct net_device *netdev, struct nlmsghdr *nlh,
u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
int ret;
u16 id;
u8 up, idtype;
struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
if (!tb[DCB_ATTR_APP])
return -EINVAL;
ret = nla_parse_nested_deprecated(app_tb, DCB_APP_ATTR_MAX,
tb[DCB_ATTR_APP], dcbnl_app_nest,
NULL);
if (ret)
return ret;
/* all must be non-null */
if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
(!app_tb[DCB_APP_ATTR_ID]) ||
(!app_tb[DCB_APP_ATTR_PRIORITY]))
return -EINVAL;
/* either by eth type or by socket number */
idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
(idtype != DCB_APP_IDTYPE_PORTNUM))
return -EINVAL;
id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]);
if (netdev->dcbnl_ops->setapp) {
ret = netdev->dcbnl_ops->setapp(netdev, idtype, id, up);
if (ret < 0)
return ret;
} else {
struct dcb_app app;
app.selector = idtype;
app.protocol = id;
app.priority = up;
ret = dcb_setapp(netdev, &app);
}
ret = nla_put_u8(skb, DCB_ATTR_APP, ret);
dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0);
return ret;
}
static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
struct nlattr **tb, struct sk_buff *skb, int dir)
{
struct nlattr *pg_nest, *param_nest, *data;
struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
u8 prio, pgid, tc_pct, up_map;
int ret;
int getall = 0;
int i;
if (!tb[DCB_ATTR_PG_CFG])
return -EINVAL;
if (!netdev->dcbnl_ops->getpgtccfgtx ||
!netdev->dcbnl_ops->getpgtccfgrx ||
!netdev->dcbnl_ops->getpgbwgcfgtx ||
!netdev->dcbnl_ops->getpgbwgcfgrx)
return -EOPNOTSUPP;
ret = nla_parse_nested_deprecated(pg_tb, DCB_PG_ATTR_MAX,
tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest,
NULL);
if (ret)
return ret;
pg_nest = nla_nest_start_noflag(skb, DCB_ATTR_PG_CFG);
if (!pg_nest)
return -EMSGSIZE;
if (pg_tb[DCB_PG_ATTR_TC_ALL])
getall = 1;
for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
if (!getall && !pg_tb[i])
continue;
if (pg_tb[DCB_PG_ATTR_TC_ALL])
data = pg_tb[DCB_PG_ATTR_TC_ALL];
else
data = pg_tb[i];
ret = nla_parse_nested_deprecated(param_tb,
DCB_TC_ATTR_PARAM_MAX, data,
dcbnl_tc_param_nest, NULL);
if (ret)
goto err_pg;
param_nest = nla_nest_start_noflag(skb, i);
if (!param_nest)
goto err_pg;
pgid = DCB_ATTR_VALUE_UNDEFINED;
prio = DCB_ATTR_VALUE_UNDEFINED;
tc_pct = DCB_ATTR_VALUE_UNDEFINED;
up_map = DCB_ATTR_VALUE_UNDEFINED;
if (dir) {
/* Rx */
netdev->dcbnl_ops->getpgtccfgrx(netdev,
i - DCB_PG_ATTR_TC_0, &prio,
&pgid, &tc_pct, &up_map);
} else {
/* Tx */
netdev->dcbnl_ops->getpgtccfgtx(netdev,
i - DCB_PG_ATTR_TC_0, &prio,
&pgid, &tc_pct, &up_map);
}
if (param_tb[DCB_TC_ATTR_PARAM_PGID] ||
param_tb[DCB_TC_ATTR_PARAM_ALL]) {
ret = nla_put_u8(skb,
DCB_TC_ATTR_PARAM_PGID, pgid);
if (ret)
goto err_param;
}
if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] ||
param_tb[DCB_TC_ATTR_PARAM_ALL]) {
ret = nla_put_u8(skb,
DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
if (ret)
goto err_param;
}
if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] ||
param_tb[DCB_TC_ATTR_PARAM_ALL]) {
ret = nla_put_u8(skb,
DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
if (ret)
goto err_param;
}
if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] ||
param_tb[DCB_TC_ATTR_PARAM_ALL]) {
ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT,
tc_pct);
if (ret)
goto err_param;
}
nla_nest_end(skb, param_nest);
}
if (pg_tb[DCB_PG_ATTR_BW_ID_ALL])
getall = 1;
else
getall = 0;
for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
if (!getall && !pg_tb[i])
continue;
tc_pct = DCB_ATTR_VALUE_UNDEFINED;
if (dir) {
/* Rx */
netdev->dcbnl_ops->getpgbwgcfgrx(netdev,
i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
} else {
/* Tx */
netdev->dcbnl_ops->getpgbwgcfgtx(netdev,
i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
}
ret = nla_put_u8(skb, i, tc_pct);
if (ret)
goto err_pg;
}
nla_nest_end(skb, pg_nest);
return 0;
err_param:
nla_nest_cancel(skb, param_nest);
err_pg:
nla_nest_cancel(skb, pg_nest);
return -EMSGSIZE;
}
static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 0);
}
static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 1);
}
static int dcbnl_setstate(struct net_device *netdev, struct nlmsghdr *nlh,
u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
u8 value;
if (!tb[DCB_ATTR_STATE])
return -EINVAL;
if (!netdev->dcbnl_ops->setstate)
return -EOPNOTSUPP;
value = nla_get_u8(tb[DCB_ATTR_STATE]);
return nla_put_u8(skb, DCB_ATTR_STATE,
netdev->dcbnl_ops->setstate(netdev, value));
}
static int dcbnl_setpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1];
int i;
int ret;
u8 value;
if (!tb[DCB_ATTR_PFC_CFG])
return -EINVAL;
if (!netdev->dcbnl_ops->setpfccfg)
return -EOPNOTSUPP;
ret = nla_parse_nested_deprecated(data, DCB_PFC_UP_ATTR_MAX,
tb[DCB_ATTR_PFC_CFG],
dcbnl_pfc_up_nest, NULL);
if (ret)
return ret;
for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
if (data[i] == NULL)
continue;
value = nla_get_u8(data[i]);
netdev->dcbnl_ops->setpfccfg(netdev,
data[i]->nla_type - DCB_PFC_UP_ATTR_0, value);
}
return nla_put_u8(skb, DCB_ATTR_PFC_CFG, 0);
}
static int dcbnl_setall(struct net_device *netdev, struct nlmsghdr *nlh,
u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
int ret;
if (!tb[DCB_ATTR_SET_ALL])
return -EINVAL;
if (!netdev->dcbnl_ops->setall)
return -EOPNOTSUPP;
ret = nla_put_u8(skb, DCB_ATTR_SET_ALL,
netdev->dcbnl_ops->setall(netdev));
dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0);
return ret;
}
static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
u32 seq, struct nlattr **tb, struct sk_buff *skb,
int dir)
{
struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
int ret;
int i;
u8 pgid;
u8 up_map;
u8 prio;
u8 tc_pct;
if (!tb[DCB_ATTR_PG_CFG])
return -EINVAL;
if (!netdev->dcbnl_ops->setpgtccfgtx ||
!netdev->dcbnl_ops->setpgtccfgrx ||
!netdev->dcbnl_ops->setpgbwgcfgtx ||
!netdev->dcbnl_ops->setpgbwgcfgrx)
return -EOPNOTSUPP;
ret = nla_parse_nested_deprecated(pg_tb, DCB_PG_ATTR_MAX,
tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest,
NULL);
if (ret)
return ret;
for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
if (!pg_tb[i])
continue;
ret = nla_parse_nested_deprecated(param_tb,
DCB_TC_ATTR_PARAM_MAX,
pg_tb[i],
dcbnl_tc_param_nest, NULL);
if (ret)
return ret;
pgid = DCB_ATTR_VALUE_UNDEFINED;
prio = DCB_ATTR_VALUE_UNDEFINED;
tc_pct = DCB_ATTR_VALUE_UNDEFINED;
up_map = DCB_ATTR_VALUE_UNDEFINED;
if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO])
prio =
nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]);
if (param_tb[DCB_TC_ATTR_PARAM_PGID])
pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]);
if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT])
tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]);
if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING])
up_map =
nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]);
/* dir: Tx = 0, Rx = 1 */
if (dir) {
/* Rx */
netdev->dcbnl_ops->setpgtccfgrx(netdev,
i - DCB_PG_ATTR_TC_0,
prio, pgid, tc_pct, up_map);
} else {
/* Tx */
netdev->dcbnl_ops->setpgtccfgtx(netdev,
i - DCB_PG_ATTR_TC_0,
prio, pgid, tc_pct, up_map);
}
}
for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
if (!pg_tb[i])
continue;
tc_pct = nla_get_u8(pg_tb[i]);
/* dir: Tx = 0, Rx = 1 */
if (dir) {
/* Rx */
netdev->dcbnl_ops->setpgbwgcfgrx(netdev,
i - DCB_PG_ATTR_BW_ID_0, tc_pct);
} else {
/* Tx */
netdev->dcbnl_ops->setpgbwgcfgtx(netdev,
i - DCB_PG_ATTR_BW_ID_0, tc_pct);
}
}
return nla_put_u8(skb, DCB_ATTR_PG_CFG, 0);
}
static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 0);
}
static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 1);
}
static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
struct nlattr *bcn_nest;
struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1];
u8 value_byte;
u32 value_integer;
int ret;
bool getall = false;
int i;
if (!tb[DCB_ATTR_BCN])
return -EINVAL;
if (!netdev->dcbnl_ops->getbcnrp ||
!netdev->dcbnl_ops->getbcncfg)
return -EOPNOTSUPP;
ret = nla_parse_nested_deprecated(bcn_tb, DCB_BCN_ATTR_MAX,
tb[DCB_ATTR_BCN], dcbnl_bcn_nest,
NULL);
if (ret)
return ret;
bcn_nest = nla_nest_start_noflag(skb, DCB_ATTR_BCN);
if (!bcn_nest)
return -EMSGSIZE;
if (bcn_tb[DCB_BCN_ATTR_ALL])
getall = true;
for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
if (!getall && !bcn_tb[i])
continue;
netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0,
&value_byte);
ret = nla_put_u8(skb, i, value_byte);
if (ret)
goto err_bcn;
}
for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
if (!getall && !bcn_tb[i])
continue;
netdev->dcbnl_ops->getbcncfg(netdev, i,
&value_integer);
ret = nla_put_u32(skb, i, value_integer);
if (ret)
goto err_bcn;
}
nla_nest_end(skb, bcn_nest);
return 0;
err_bcn:
nla_nest_cancel(skb, bcn_nest);
return ret;
}
static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
struct nlattr *data[DCB_BCN_ATTR_MAX + 1];
int i;
int ret;
u8 value_byte;
u32 value_int;
if (!tb[DCB_ATTR_BCN])
return -EINVAL;
if (!netdev->dcbnl_ops->setbcncfg ||
!netdev->dcbnl_ops->setbcnrp)
return -EOPNOTSUPP;
ret = nla_parse_nested_deprecated(data, DCB_BCN_ATTR_MAX,
tb[DCB_ATTR_BCN], dcbnl_bcn_nest,
NULL);
if (ret)
return ret;
for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
if (data[i] == NULL)
continue;
value_byte = nla_get_u8(data[i]);
netdev->dcbnl_ops->setbcnrp(netdev,
data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte);
}
for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
if (data[i] == NULL)
continue;
value_int = nla_get_u32(data[i]);
netdev->dcbnl_ops->setbcncfg(netdev,
i, value_int);
}
return nla_put_u8(skb, DCB_ATTR_BCN, 0);
}
static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
int app_nested_type, int app_info_type,
int app_entry_type)
{
struct dcb_peer_app_info info;
struct dcb_app *table = NULL;
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
u16 app_count;
int err;
/**
* retrieve the peer app configuration form the driver. If the driver
* handlers fail exit without doing anything
*/
err = ops->peer_getappinfo(netdev, &info, &app_count);
if (!err && app_count) {
table = kmalloc_array(app_count, sizeof(struct dcb_app),
GFP_KERNEL);
if (!table)
return -ENOMEM;
err = ops->peer_getapptable(netdev, table);
}
if (!err) {
u16 i;
struct nlattr *app;
/**
* build the message, from here on the only possible failure
* is due to the skb size
*/
err = -EMSGSIZE;
app = nla_nest_start_noflag(skb, app_nested_type);
if (!app)
goto nla_put_failure;
if (app_info_type &&
nla_put(skb, app_info_type, sizeof(info), &info))
goto nla_put_failure;
for (i = 0; i < app_count; i++) {
if (nla_put(skb, app_entry_type, sizeof(struct dcb_app),
&table[i]))
goto nla_put_failure;
}
nla_nest_end(skb, app);
}
err = 0;
nla_put_failure:
kfree(table);
return err;
}
static int dcbnl_getapptrust(struct net_device *netdev, struct sk_buff *skb)
{
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
enum ieee_attrs_app type;
struct nlattr *apptrust;
int nselectors, err, i;
u8 *selectors;
selectors = kzalloc(IEEE_8021QAZ_APP_SEL_MAX + 1, GFP_KERNEL);
if (!selectors)
return -ENOMEM;
err = ops->dcbnl_getapptrust(netdev, selectors, &nselectors);
if (err) {
err = 0;
goto out;
}
apptrust = nla_nest_start(skb, DCB_ATTR_DCB_APP_TRUST_TABLE);
if (!apptrust) {
err = -EMSGSIZE;
goto out;
}
for (i = 0; i < nselectors; i++) {
type = dcbnl_app_attr_type_get(selectors[i]);
err = nla_put_u8(skb, type, selectors[i]);
if (err) {
nla_nest_cancel(skb, apptrust);
goto out;
}
}
nla_nest_end(skb, apptrust);
out:
kfree(selectors);
return err;
}
/* Set or delete APP table or rewrite table entries. The APP struct is validated
* and the appropriate callback function is called.
*/
static int dcbnl_app_table_setdel(struct nlattr *attr,
struct net_device *netdev,
int (*setdel)(struct net_device *dev,
struct dcb_app *app))
{
struct dcb_app *app_data;
enum ieee_attrs_app type;
struct nlattr *attr_itr;
int rem, err;
nla_for_each_nested(attr_itr, attr, rem) {
type = nla_type(attr_itr);
if (!dcbnl_app_attr_type_validate(type))
continue;
if (nla_len(attr_itr) < sizeof(struct dcb_app))
return -ERANGE;
app_data = nla_data(attr_itr);
if (!dcbnl_app_selector_validate(type, app_data->selector))
return -EINVAL;
err = setdel(netdev, app_data);
if (err)
return err;
}
return 0;
}
/* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb GET commands. */
static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
{
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
struct nlattr *ieee, *app, *rewr;
struct dcb_app_type *itr;
int dcbx;
int err;
if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
return -EMSGSIZE;
ieee = nla_nest_start_noflag(skb, DCB_ATTR_IEEE);
if (!ieee)
return -EMSGSIZE;
if (ops->ieee_getets) {
struct ieee_ets ets;
memset(&ets, 0, sizeof(ets));
err = ops->ieee_getets(netdev, &ets);
if (!err &&
nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets))
return -EMSGSIZE;
}
if (ops->ieee_getmaxrate) {
struct ieee_maxrate maxrate;
memset(&maxrate, 0, sizeof(maxrate));
err = ops->ieee_getmaxrate(netdev, &maxrate);
if (!err) {
err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE,
sizeof(maxrate), &maxrate);
if (err)
return -EMSGSIZE;
}
}
if (ops->ieee_getqcn) {
struct ieee_qcn qcn;
memset(&qcn, 0, sizeof(qcn));
err = ops->ieee_getqcn(netdev, &qcn);
if (!err) {
err = nla_put(skb, DCB_ATTR_IEEE_QCN,
sizeof(qcn), &qcn);
if (err)
return -EMSGSIZE;
}
}
if (ops->ieee_getqcnstats) {
struct ieee_qcn_stats qcn_stats;
memset(&qcn_stats, 0, sizeof(qcn_stats));
err = ops->ieee_getqcnstats(netdev, &qcn_stats);
if (!err) {
err = nla_put(skb, DCB_ATTR_IEEE_QCN_STATS,
sizeof(qcn_stats), &qcn_stats);
if (err)
return -EMSGSIZE;
}
}
if (ops->ieee_getpfc) {
struct ieee_pfc pfc;
memset(&pfc, 0, sizeof(pfc));
err = ops->ieee_getpfc(netdev, &pfc);
if (!err &&
nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc))
return -EMSGSIZE;
}
if (ops->dcbnl_getbuffer) {
struct dcbnl_buffer buffer;
memset(&buffer, 0, sizeof(buffer));
err = ops->dcbnl_getbuffer(netdev, &buffer);
if (!err &&
nla_put(skb, DCB_ATTR_DCB_BUFFER, sizeof(buffer), &buffer))
return -EMSGSIZE;
}
app = nla_nest_start_noflag(skb, DCB_ATTR_IEEE_APP_TABLE);
if (!app)
return -EMSGSIZE;
spin_lock_bh(&dcb_lock);
list_for_each_entry(itr, &dcb_app_list, list) {
if (itr->ifindex == netdev->ifindex) {
enum ieee_attrs_app type =
dcbnl_app_attr_type_get(itr->app.selector);
err = nla_put(skb, type, sizeof(itr->app), &itr->app);
if (err) {
spin_unlock_bh(&dcb_lock);
return -EMSGSIZE;
}
}
}
if (netdev->dcbnl_ops->getdcbx)
dcbx = netdev->dcbnl_ops->getdcbx(netdev);
else
dcbx = -EOPNOTSUPP;
spin_unlock_bh(&dcb_lock);
nla_nest_end(skb, app);
rewr = nla_nest_start(skb, DCB_ATTR_DCB_REWR_TABLE);
if (!rewr)
return -EMSGSIZE;
spin_lock_bh(&dcb_lock);
list_for_each_entry(itr, &dcb_rewr_list, list) {
if (itr->ifindex == netdev->ifindex) {
enum ieee_attrs_app type =
dcbnl_app_attr_type_get(itr->app.selector);
err = nla_put(skb, type, sizeof(itr->app), &itr->app);
if (err) {
spin_unlock_bh(&dcb_lock);
nla_nest_cancel(skb, rewr);
return -EMSGSIZE;
}
}
}
spin_unlock_bh(&dcb_lock);
nla_nest_end(skb, rewr);
if (ops->dcbnl_getapptrust) {
err = dcbnl_getapptrust(netdev, skb);
if (err)
return err;
}
/* get peer info if available */
if (ops->ieee_peer_getets) {
struct ieee_ets ets;
memset(&ets, 0, sizeof(ets));
err = ops->ieee_peer_getets(netdev, &ets);
if (!err &&
nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets))
return -EMSGSIZE;
}
if (ops->ieee_peer_getpfc) {
struct ieee_pfc pfc;
memset(&pfc, 0, sizeof(pfc));
err = ops->ieee_peer_getpfc(netdev, &pfc);
if (!err &&
nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc))
return -EMSGSIZE;
}
if (ops->peer_getappinfo && ops->peer_getapptable) {
err = dcbnl_build_peer_app(netdev, skb,
DCB_ATTR_IEEE_PEER_APP,
DCB_ATTR_IEEE_APP_UNSPEC,
DCB_ATTR_IEEE_APP);
if (err)
return -EMSGSIZE;
}
nla_nest_end(skb, ieee);
if (dcbx >= 0) {
err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
if (err)
return -EMSGSIZE;
}
return 0;
}
static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
int dir)
{
u8 pgid, up_map, prio, tc_pct;
const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG;
struct nlattr *pg = nla_nest_start_noflag(skb, i);
if (!pg)
return -EMSGSIZE;
for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
struct nlattr *tc_nest = nla_nest_start_noflag(skb, i);
if (!tc_nest)
return -EMSGSIZE;
pgid = DCB_ATTR_VALUE_UNDEFINED;
prio = DCB_ATTR_VALUE_UNDEFINED;
tc_pct = DCB_ATTR_VALUE_UNDEFINED;
up_map = DCB_ATTR_VALUE_UNDEFINED;
if (!dir)
ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0,
&prio, &pgid, &tc_pct, &up_map);
else
ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0,
&prio, &pgid, &tc_pct, &up_map);
if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) ||
nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) ||
nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) ||
nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct))
return -EMSGSIZE;
nla_nest_end(skb, tc_nest);
}
for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
tc_pct = DCB_ATTR_VALUE_UNDEFINED;
if (!dir)
ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0,
&tc_pct);
else
ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
&tc_pct);
if (nla_put_u8(skb, i, tc_pct))
return -EMSGSIZE;
}
nla_nest_end(skb, pg);
return 0;
}
static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
{
struct nlattr *cee, *app;
struct dcb_app_type *itr;
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
int dcbx, i, err = -EMSGSIZE;
u8 value;
if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
goto nla_put_failure;
cee = nla_nest_start_noflag(skb, DCB_ATTR_CEE);
if (!cee)
goto nla_put_failure;
/* local pg */
if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) {
err = dcbnl_cee_pg_fill(skb, netdev, 1);
if (err)
goto nla_put_failure;
}
if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) {
err = dcbnl_cee_pg_fill(skb, netdev, 0);
if (err)
goto nla_put_failure;
}
/* local pfc */
if (ops->getpfccfg) {
struct nlattr *pfc_nest = nla_nest_start_noflag(skb,
DCB_ATTR_CEE_PFC);
if (!pfc_nest)
goto nla_put_failure;
for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value);
if (nla_put_u8(skb, i, value))
goto nla_put_failure;
}
nla_nest_end(skb, pfc_nest);
}
/* local app */
spin_lock_bh(&dcb_lock);
app = nla_nest_start_noflag(skb, DCB_ATTR_CEE_APP_TABLE);
if (!app)
goto dcb_unlock;
list_for_each_entry(itr, &dcb_app_list, list) {
if (itr->ifindex == netdev->ifindex) {
struct nlattr *app_nest = nla_nest_start_noflag(skb,
DCB_ATTR_APP);
if (!app_nest)
goto dcb_unlock;
err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE,
itr->app.selector);
if (err)
goto dcb_unlock;
err = nla_put_u16(skb, DCB_APP_ATTR_ID,
itr->app.protocol);
if (err)
goto dcb_unlock;
err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY,
itr->app.priority);
if (err)
goto dcb_unlock;
nla_nest_end(skb, app_nest);
}
}
nla_nest_end(skb, app);
if (netdev->dcbnl_ops->getdcbx)
dcbx = netdev->dcbnl_ops->getdcbx(netdev);
else
dcbx = -EOPNOTSUPP;
spin_unlock_bh(&dcb_lock);
/* features flags */
if (ops->getfeatcfg) {
struct nlattr *feat = nla_nest_start_noflag(skb,
DCB_ATTR_CEE_FEAT);
if (!feat)
goto nla_put_failure;
for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX;
i++)
if (!ops->getfeatcfg(netdev, i, &value) &&
nla_put_u8(skb, i, value))
goto nla_put_failure;
nla_nest_end(skb, feat);
}
/* peer info if available */
if (ops->cee_peer_getpg) {
struct cee_pg pg;
memset(&pg, 0, sizeof(pg));
err = ops->cee_peer_getpg(netdev, &pg);
if (!err &&
nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg))
goto nla_put_failure;
}
if (ops->cee_peer_getpfc) {
struct cee_pfc pfc;
memset(&pfc, 0, sizeof(pfc));
err = ops->cee_peer_getpfc(netdev, &pfc);
if (!err &&
nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc))
goto nla_put_failure;
}
if (ops->peer_getappinfo && ops->peer_getapptable) {
err = dcbnl_build_peer_app(netdev, skb,
DCB_ATTR_CEE_PEER_APP_TABLE,
DCB_ATTR_CEE_PEER_APP_INFO,
DCB_ATTR_CEE_PEER_APP);
if (err)
goto nla_put_failure;
}
nla_nest_end(skb, cee);
/* DCBX state */
if (dcbx >= 0) {
err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
if (err)
goto nla_put_failure;
}
return 0;
dcb_unlock:
spin_unlock_bh(&dcb_lock);
nla_put_failure:
err = -EMSGSIZE;
return err;
}
static int dcbnl_notify(struct net_device *dev, int event, int cmd,
u32 seq, u32 portid, int dcbx_ver)
{
struct net *net = dev_net(dev);
struct sk_buff *skb;
struct nlmsghdr *nlh;
const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
int err;
if (!ops)
return -EOPNOTSUPP;
skb = dcbnl_newmsg(event, cmd, portid, seq, 0, &nlh);
if (!skb)
return -ENOMEM;
if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE)
err = dcbnl_ieee_fill(skb, dev);
else
err = dcbnl_cee_fill(skb, dev);
if (err < 0) {
/* Report error to broadcast listeners */
nlmsg_free(skb);
rtnl_set_sk_err(net, RTNLGRP_DCB, err);
} else {
/* End nlmsg and notify broadcast listeners */
nlmsg_end(skb, nlh);
rtnl_notify(skb, net, 0, RTNLGRP_DCB, NULL, GFP_KERNEL);
}
return err;
}
int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
u32 seq, u32 portid)
{
return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_IEEE);
}
EXPORT_SYMBOL(dcbnl_ieee_notify);
int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
u32 seq, u32 portid)
{
return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_CEE);
}
EXPORT_SYMBOL(dcbnl_cee_notify);
/* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb SET commands.
* If any requested operation can not be completed
* the entire msg is aborted and error value is returned.
* No attempt is made to reconcile the case where only part of the
* cmd can be completed.
*/
static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
int prio;
int err;
if (!ops)
return -EOPNOTSUPP;
if (!tb[DCB_ATTR_IEEE])
return -EINVAL;
err = nla_parse_nested_deprecated(ieee, DCB_ATTR_IEEE_MAX,
tb[DCB_ATTR_IEEE],
dcbnl_ieee_policy, NULL);
if (err)
return err;
if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) {
struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]);
err = ops->ieee_setets(netdev, ets);
if (err)
goto err;
}
if (ieee[DCB_ATTR_IEEE_MAXRATE] && ops->ieee_setmaxrate) {
struct ieee_maxrate *maxrate =
nla_data(ieee[DCB_ATTR_IEEE_MAXRATE]);
err = ops->ieee_setmaxrate(netdev, maxrate);
if (err)
goto err;
}
if (ieee[DCB_ATTR_IEEE_QCN] && ops->ieee_setqcn) {
struct ieee_qcn *qcn =
nla_data(ieee[DCB_ATTR_IEEE_QCN]);
err = ops->ieee_setqcn(netdev, qcn);
if (err)
goto err;
}
if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
err = ops->ieee_setpfc(netdev, pfc);
if (err)
goto err;
}
if (ieee[DCB_ATTR_DCB_BUFFER] && ops->dcbnl_setbuffer) {
struct dcbnl_buffer *buffer =
nla_data(ieee[DCB_ATTR_DCB_BUFFER]);
for (prio = 0; prio < ARRAY_SIZE(buffer->prio2buffer); prio++) {
if (buffer->prio2buffer[prio] >= DCBX_MAX_BUFFERS) {
err = -EINVAL;
goto err;
}
}
err = ops->dcbnl_setbuffer(netdev, buffer);
if (err)
goto err;
}
if (ieee[DCB_ATTR_DCB_REWR_TABLE]) {
err = dcbnl_app_table_setdel(ieee[DCB_ATTR_DCB_REWR_TABLE],
netdev,
ops->dcbnl_setrewr ?: dcb_setrewr);
if (err)
goto err;
}
if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
err = dcbnl_app_table_setdel(ieee[DCB_ATTR_IEEE_APP_TABLE],
netdev, ops->ieee_setapp ?:
dcb_ieee_setapp);
if (err)
goto err;
}
if (ieee[DCB_ATTR_DCB_APP_TRUST_TABLE]) {
u8 selectors[IEEE_8021QAZ_APP_SEL_MAX + 1] = {0};
struct nlattr *attr;
int nselectors = 0;
int rem;
if (!ops->dcbnl_setapptrust) {
err = -EOPNOTSUPP;
goto err;
}
nla_for_each_nested(attr, ieee[DCB_ATTR_DCB_APP_TRUST_TABLE],
rem) {
enum ieee_attrs_app type = nla_type(attr);
u8 selector;
int i;
if (!dcbnl_app_attr_type_validate(type) ||
nla_len(attr) != 1 ||
nselectors >= sizeof(selectors)) {
err = -EINVAL;
goto err;
}
selector = nla_get_u8(attr);
if (!dcbnl_app_selector_validate(type, selector)) {
err = -EINVAL;
goto err;
}
/* Duplicate selector ? */
for (i = 0; i < nselectors; i++) {
if (selectors[i] == selector) {
err = -EINVAL;
goto err;
}
}
selectors[nselectors++] = selector;
}
err = ops->dcbnl_setapptrust(netdev, selectors, nselectors);
if (err)
goto err;
}
err:
err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0);
return err;
}
static int dcbnl_ieee_get(struct net_device *netdev, struct nlmsghdr *nlh,
u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
if (!ops)
return -EOPNOTSUPP;
return dcbnl_ieee_fill(skb, netdev);
}
static int dcbnl_ieee_del(struct net_device *netdev, struct nlmsghdr *nlh,
u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
int err;
if (!ops)
return -EOPNOTSUPP;
if (!tb[DCB_ATTR_IEEE])
return -EINVAL;
err = nla_parse_nested_deprecated(ieee, DCB_ATTR_IEEE_MAX,
tb[DCB_ATTR_IEEE],
dcbnl_ieee_policy, NULL);
if (err)
return err;
if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
err = dcbnl_app_table_setdel(ieee[DCB_ATTR_IEEE_APP_TABLE],
netdev, ops->ieee_delapp ?:
dcb_ieee_delapp);
if (err)
goto err;
}
if (ieee[DCB_ATTR_DCB_REWR_TABLE]) {
err = dcbnl_app_table_setdel(ieee[DCB_ATTR_DCB_REWR_TABLE],
netdev,
ops->dcbnl_delrewr ?: dcb_delrewr);
if (err)
goto err;
}
err:
err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0);
return err;
}
/* DCBX configuration */
static int dcbnl_getdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
if (!netdev->dcbnl_ops->getdcbx)
return -EOPNOTSUPP;
return nla_put_u8(skb, DCB_ATTR_DCBX,
netdev->dcbnl_ops->getdcbx(netdev));
}
static int dcbnl_setdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
u8 value;
if (!netdev->dcbnl_ops->setdcbx)
return -EOPNOTSUPP;
if (!tb[DCB_ATTR_DCBX])
return -EINVAL;
value = nla_get_u8(tb[DCB_ATTR_DCBX]);
return nla_put_u8(skb, DCB_ATTR_DCBX,
netdev->dcbnl_ops->setdcbx(netdev, value));
}
static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest;
u8 value;
int ret, i;
int getall = 0;
if (!netdev->dcbnl_ops->getfeatcfg)
return -EOPNOTSUPP;
if (!tb[DCB_ATTR_FEATCFG])
return -EINVAL;
ret = nla_parse_nested_deprecated(data, DCB_FEATCFG_ATTR_MAX,
tb[DCB_ATTR_FEATCFG],
dcbnl_featcfg_nest, NULL);
if (ret)
return ret;
nest = nla_nest_start_noflag(skb, DCB_ATTR_FEATCFG);
if (!nest)
return -EMSGSIZE;
if (data[DCB_FEATCFG_ATTR_ALL])
getall = 1;
for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
if (!getall && !data[i])
continue;
ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value);
if (!ret)
ret = nla_put_u8(skb, i, value);
if (ret) {
nla_nest_cancel(skb, nest);
goto nla_put_failure;
}
}
nla_nest_end(skb, nest);
nla_put_failure:
return ret;
}
static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1];
int ret, i;
u8 value;
if (!netdev->dcbnl_ops->setfeatcfg)
return -ENOTSUPP;
if (!tb[DCB_ATTR_FEATCFG])
return -EINVAL;
ret = nla_parse_nested_deprecated(data, DCB_FEATCFG_ATTR_MAX,
tb[DCB_ATTR_FEATCFG],
dcbnl_featcfg_nest, NULL);
if (ret)
goto err;
for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
if (data[i] == NULL)
continue;
value = nla_get_u8(data[i]);
ret = netdev->dcbnl_ops->setfeatcfg(netdev, i, value);
if (ret)
goto err;
}
err:
ret = nla_put_u8(skb, DCB_ATTR_FEATCFG, ret);
return ret;
}
/* Handle CEE DCBX GET commands. */
static int dcbnl_cee_get(struct net_device *netdev, struct nlmsghdr *nlh,
u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
if (!ops)
return -EOPNOTSUPP;
return dcbnl_cee_fill(skb, netdev);
}
struct reply_func {
/* reply netlink message type */
int type;
/* function to fill message contents */
int (*cb)(struct net_device *, struct nlmsghdr *, u32,
struct nlattr **, struct sk_buff *);
};
static const struct reply_func reply_funcs[DCB_CMD_MAX+1] = {
[DCB_CMD_GSTATE] = { RTM_GETDCB, dcbnl_getstate },
[DCB_CMD_SSTATE] = { RTM_SETDCB, dcbnl_setstate },
[DCB_CMD_PFC_GCFG] = { RTM_GETDCB, dcbnl_getpfccfg },
[DCB_CMD_PFC_SCFG] = { RTM_SETDCB, dcbnl_setpfccfg },
[DCB_CMD_GPERM_HWADDR] = { RTM_GETDCB, dcbnl_getperm_hwaddr },
[DCB_CMD_GCAP] = { RTM_GETDCB, dcbnl_getcap },
[DCB_CMD_GNUMTCS] = { RTM_GETDCB, dcbnl_getnumtcs },
[DCB_CMD_SNUMTCS] = { RTM_SETDCB, dcbnl_setnumtcs },
[DCB_CMD_PFC_GSTATE] = { RTM_GETDCB, dcbnl_getpfcstate },
[DCB_CMD_PFC_SSTATE] = { RTM_SETDCB, dcbnl_setpfcstate },
[DCB_CMD_GAPP] = { RTM_GETDCB, dcbnl_getapp },
[DCB_CMD_SAPP] = { RTM_SETDCB, dcbnl_setapp },
[DCB_CMD_PGTX_GCFG] = { RTM_GETDCB, dcbnl_pgtx_getcfg },
[DCB_CMD_PGTX_SCFG] = { RTM_SETDCB, dcbnl_pgtx_setcfg },
[DCB_CMD_PGRX_GCFG] = { RTM_GETDCB, dcbnl_pgrx_getcfg },
[DCB_CMD_PGRX_SCFG] = { RTM_SETDCB, dcbnl_pgrx_setcfg },
[DCB_CMD_SET_ALL] = { RTM_SETDCB, dcbnl_setall },
[DCB_CMD_BCN_GCFG] = { RTM_GETDCB, dcbnl_bcn_getcfg },
[DCB_CMD_BCN_SCFG] = { RTM_SETDCB, dcbnl_bcn_setcfg },
[DCB_CMD_IEEE_GET] = { RTM_GETDCB, dcbnl_ieee_get },
[DCB_CMD_IEEE_SET] = { RTM_SETDCB, dcbnl_ieee_set },
[DCB_CMD_IEEE_DEL] = { RTM_SETDCB, dcbnl_ieee_del },
[DCB_CMD_GDCBX] = { RTM_GETDCB, dcbnl_getdcbx },
[DCB_CMD_SDCBX] = { RTM_SETDCB, dcbnl_setdcbx },
[DCB_CMD_GFEATCFG] = { RTM_GETDCB, dcbnl_getfeatcfg },
[DCB_CMD_SFEATCFG] = { RTM_SETDCB, dcbnl_setfeatcfg },
[DCB_CMD_CEE_GET] = { RTM_GETDCB, dcbnl_cee_get },
};
static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct net_device *netdev;
struct dcbmsg *dcb = nlmsg_data(nlh);
struct nlattr *tb[DCB_ATTR_MAX + 1];
u32 portid = NETLINK_CB(skb).portid;
int ret = -EINVAL;
struct sk_buff *reply_skb;
struct nlmsghdr *reply_nlh = NULL;
const struct reply_func *fn;
if ((nlh->nlmsg_type == RTM_SETDCB) && !netlink_capable(skb, CAP_NET_ADMIN))
return -EPERM;
ret = nlmsg_parse_deprecated(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
dcbnl_rtnl_policy, extack);
if (ret < 0)
return ret;
if (dcb->cmd > DCB_CMD_MAX)
return -EINVAL;
/* check if a reply function has been defined for the command */
fn = &reply_funcs[dcb->cmd];
if (!fn->cb)
return -EOPNOTSUPP;
if (fn->type == RTM_SETDCB && !netlink_capable(skb, CAP_NET_ADMIN))
return -EPERM;
if (!tb[DCB_ATTR_IFNAME])
return -EINVAL;
netdev = __dev_get_by_name(net, nla_data(tb[DCB_ATTR_IFNAME]));
if (!netdev)
return -ENODEV;
if (!netdev->dcbnl_ops)
return -EOPNOTSUPP;
reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, portid, nlh->nlmsg_seq,
nlh->nlmsg_flags, &reply_nlh);
if (!reply_skb)
return -ENOMEM;
ret = fn->cb(netdev, nlh, nlh->nlmsg_seq, tb, reply_skb);
if (ret < 0) {
nlmsg_free(reply_skb);
goto out;
}
nlmsg_end(reply_skb, reply_nlh);
ret = rtnl_unicast(reply_skb, net, portid);
out:
return ret;
}
static struct dcb_app_type *dcb_rewr_lookup(const struct dcb_app *app,
int ifindex, int proto)
{
struct dcb_app_type *itr;
list_for_each_entry(itr, &dcb_rewr_list, list) {
if (itr->app.selector == app->selector &&
itr->app.priority == app->priority &&
itr->ifindex == ifindex &&
((proto == -1) || itr->app.protocol == proto))
return itr;
}
return NULL;
}
static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app,
int ifindex, int prio)
{
struct dcb_app_type *itr;
list_for_each_entry(itr, &dcb_app_list, list) {
if (itr->app.selector == app->selector &&
itr->app.protocol == app->protocol &&
itr->ifindex == ifindex &&
((prio == -1) || itr->app.priority == prio))
return itr;
}
return NULL;
}
static int dcb_app_add(struct list_head *list, const struct dcb_app *app,
int ifindex)
{
struct dcb_app_type *entry;
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
return -ENOMEM;
memcpy(&entry->app, app, sizeof(*app));
entry->ifindex = ifindex;
list_add(&entry->list, list);
return 0;
}
/**
* dcb_getapp - retrieve the DCBX application user priority
* @dev: network interface
* @app: application to get user priority of
*
* On success returns a non-zero 802.1p user priority bitmap
* otherwise returns 0 as the invalid user priority bitmap to
* indicate an error.
*/
u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
{
struct dcb_app_type *itr;
u8 prio = 0;
spin_lock_bh(&dcb_lock);
itr = dcb_app_lookup(app, dev->ifindex, -1);
if (itr)
prio = itr->app.priority;
spin_unlock_bh(&dcb_lock);
return prio;
}
EXPORT_SYMBOL(dcb_getapp);
/**
* dcb_setapp - add CEE dcb application data to app list
* @dev: network interface
* @new: application data to add
*
* Priority 0 is an invalid priority in CEE spec. This routine
* removes applications from the app list if the priority is
* set to zero. Priority is expected to be 8-bit 802.1p user priority bitmap
*/
int dcb_setapp(struct net_device *dev, struct dcb_app *new)
{
struct dcb_app_type *itr;
struct dcb_app_type event;
int err = 0;
event.ifindex = dev->ifindex;
memcpy(&event.app, new, sizeof(event.app));
if (dev->dcbnl_ops->getdcbx)
event.dcbx = dev->dcbnl_ops->getdcbx(dev);
spin_lock_bh(&dcb_lock);
/* Search for existing match and replace */
itr = dcb_app_lookup(new, dev->ifindex, -1);
if (itr) {
if (new->priority)
itr->app.priority = new->priority;
else {
list_del(&itr->list);
kfree(itr);
}
goto out;
}
/* App type does not exist add new application type */
if (new->priority)
err = dcb_app_add(&dcb_app_list, new, dev->ifindex);
out:
spin_unlock_bh(&dcb_lock);
if (!err)
call_dcbevent_notifiers(DCB_APP_EVENT, &event);
return err;
}
EXPORT_SYMBOL(dcb_setapp);
/**
* dcb_ieee_getapp_mask - retrieve the IEEE DCB application priority
* @dev: network interface
* @app: where to store the retrieve application data
*
* Helper routine which on success returns a non-zero 802.1Qaz user
* priority bitmap otherwise returns 0 to indicate the dcb_app was
* not found in APP list.
*/
u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
{
struct dcb_app_type *itr;
u8 prio = 0;
spin_lock_bh(&dcb_lock);
itr = dcb_app_lookup(app, dev->ifindex, -1);
if (itr)
prio |= 1 << itr->app.priority;
spin_unlock_bh(&dcb_lock);
return prio;
}
EXPORT_SYMBOL(dcb_ieee_getapp_mask);
/* Get protocol value from rewrite entry. */
u16 dcb_getrewr(struct net_device *dev, struct dcb_app *app)
{
struct dcb_app_type *itr;
u16 proto = 0;
spin_lock_bh(&dcb_lock);
itr = dcb_rewr_lookup(app, dev->ifindex, -1);
if (itr)
proto = itr->app.protocol;
spin_unlock_bh(&dcb_lock);
return proto;
}
EXPORT_SYMBOL(dcb_getrewr);
/* Add rewrite entry to the rewrite list. */
int dcb_setrewr(struct net_device *dev, struct dcb_app *new)
{
int err;
spin_lock_bh(&dcb_lock);
/* Search for existing match and abort if found. */
if (dcb_rewr_lookup(new, dev->ifindex, new->protocol)) {
err = -EEXIST;
goto out;
}
err = dcb_app_add(&dcb_rewr_list, new, dev->ifindex);
out:
spin_unlock_bh(&dcb_lock);
return err;
}
EXPORT_SYMBOL(dcb_setrewr);
/* Delete rewrite entry from the rewrite list. */
int dcb_delrewr(struct net_device *dev, struct dcb_app *del)
{
struct dcb_app_type *itr;
int err = -ENOENT;
spin_lock_bh(&dcb_lock);
/* Search for existing match and remove it. */
itr = dcb_rewr_lookup(del, dev->ifindex, del->protocol);
if (itr) {
list_del(&itr->list);
kfree(itr);
err = 0;
}
spin_unlock_bh(&dcb_lock);
return err;
}
EXPORT_SYMBOL(dcb_delrewr);
/**
* dcb_ieee_setapp - add IEEE dcb application data to app list
* @dev: network interface
* @new: application data to add
*
* This adds Application data to the list. Multiple application
* entries may exists for the same selector and protocol as long
* as the priorities are different. Priority is expected to be a
* 3-bit unsigned integer
*/
int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
{
struct dcb_app_type event;
int err = 0;
event.ifindex = dev->ifindex;
memcpy(&event.app, new, sizeof(event.app));
if (dev->dcbnl_ops->getdcbx)
event.dcbx = dev->dcbnl_ops->getdcbx(dev);
spin_lock_bh(&dcb_lock);
/* Search for existing match and abort if found */
if (dcb_app_lookup(new, dev->ifindex, new->priority)) {
err = -EEXIST;
goto out;
}
err = dcb_app_add(&dcb_app_list, new, dev->ifindex);
out:
spin_unlock_bh(&dcb_lock);
if (!err)
call_dcbevent_notifiers(DCB_APP_EVENT, &event);
return err;
}
EXPORT_SYMBOL(dcb_ieee_setapp);
/**
* dcb_ieee_delapp - delete IEEE dcb application data from list
* @dev: network interface
* @del: application data to delete
*
* This removes a matching APP data from the APP list
*/
int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
{
struct dcb_app_type *itr;
struct dcb_app_type event;
int err = -ENOENT;
event.ifindex = dev->ifindex;
memcpy(&event.app, del, sizeof(event.app));
if (dev->dcbnl_ops->getdcbx)
event.dcbx = dev->dcbnl_ops->getdcbx(dev);
spin_lock_bh(&dcb_lock);
/* Search for existing match and remove it. */
if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) {
list_del(&itr->list);
kfree(itr);
err = 0;
}
spin_unlock_bh(&dcb_lock);
if (!err)
call_dcbevent_notifiers(DCB_APP_EVENT, &event);
return err;
}
EXPORT_SYMBOL(dcb_ieee_delapp);
/* dcb_getrewr_prio_pcp_mask_map - For a given device, find mapping from
* priorities to the PCP and DEI values assigned to that priority.
*/
void dcb_getrewr_prio_pcp_mask_map(const struct net_device *dev,
struct dcb_rewr_prio_pcp_map *p_map)
{
int ifindex = dev->ifindex;
struct dcb_app_type *itr;
u8 prio;
memset(p_map->map, 0, sizeof(p_map->map));
spin_lock_bh(&dcb_lock);
list_for_each_entry(itr, &dcb_rewr_list, list) {
if (itr->ifindex == ifindex &&
itr->app.selector == DCB_APP_SEL_PCP &&
itr->app.protocol < 16 &&
itr->app.priority < IEEE_8021QAZ_MAX_TCS) {
prio = itr->app.priority;
p_map->map[prio] |= 1 << itr->app.protocol;
}
}
spin_unlock_bh(&dcb_lock);
}
EXPORT_SYMBOL(dcb_getrewr_prio_pcp_mask_map);
/* dcb_getrewr_prio_dscp_mask_map - For a given device, find mapping from
* priorities to the DSCP values assigned to that priority.
*/
void dcb_getrewr_prio_dscp_mask_map(const struct net_device *dev,
struct dcb_ieee_app_prio_map *p_map)
{
int ifindex = dev->ifindex;
struct dcb_app_type *itr;
u8 prio;
memset(p_map->map, 0, sizeof(p_map->map));
spin_lock_bh(&dcb_lock);
list_for_each_entry(itr, &dcb_rewr_list, list) {
if (itr->ifindex == ifindex &&
itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP &&
itr->app.protocol < 64 &&
itr->app.priority < IEEE_8021QAZ_MAX_TCS) {
prio = itr->app.priority;
p_map->map[prio] |= 1ULL << itr->app.protocol;
}
}
spin_unlock_bh(&dcb_lock);
}
EXPORT_SYMBOL(dcb_getrewr_prio_dscp_mask_map);
/*
* dcb_ieee_getapp_prio_dscp_mask_map - For a given device, find mapping from
* priorities to the DSCP values assigned to that priority. Initialize p_map
* such that each map element holds a bit mask of DSCP values configured for
* that priority by APP entries.
*/
void dcb_ieee_getapp_prio_dscp_mask_map(const struct net_device *dev,
struct dcb_ieee_app_prio_map *p_map)
{
int ifindex = dev->ifindex;
struct dcb_app_type *itr;
u8 prio;
memset(p_map->map, 0, sizeof(p_map->map));
spin_lock_bh(&dcb_lock);
list_for_each_entry(itr, &dcb_app_list, list) {
if (itr->ifindex == ifindex &&
itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP &&
itr->app.protocol < 64 &&
itr->app.priority < IEEE_8021QAZ_MAX_TCS) {
prio = itr->app.priority;
p_map->map[prio] |= 1ULL << itr->app.protocol;
}
}
spin_unlock_bh(&dcb_lock);
}
EXPORT_SYMBOL(dcb_ieee_getapp_prio_dscp_mask_map);
/*
* dcb_ieee_getapp_dscp_prio_mask_map - For a given device, find mapping from
* DSCP values to the priorities assigned to that DSCP value. Initialize p_map
* such that each map element holds a bit mask of priorities configured for a
* given DSCP value by APP entries.
*/
void
dcb_ieee_getapp_dscp_prio_mask_map(const struct net_device *dev,
struct dcb_ieee_app_dscp_map *p_map)
{
int ifindex = dev->ifindex;
struct dcb_app_type *itr;
memset(p_map->map, 0, sizeof(p_map->map));
spin_lock_bh(&dcb_lock);
list_for_each_entry(itr, &dcb_app_list, list) {
if (itr->ifindex == ifindex &&
itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP &&
itr->app.protocol < 64 &&
itr->app.priority < IEEE_8021QAZ_MAX_TCS)
p_map->map[itr->app.protocol] |= 1 << itr->app.priority;
}
spin_unlock_bh(&dcb_lock);
}
EXPORT_SYMBOL(dcb_ieee_getapp_dscp_prio_mask_map);
/*
* Per 802.1Q-2014, the selector value of 1 is used for matching on Ethernet
* type, with valid PID values >= 1536. A special meaning is then assigned to
* protocol value of 0: "default priority. For use when priority is not
* otherwise specified".
*
* dcb_ieee_getapp_default_prio_mask - For a given device, find all APP entries
* of the form {$PRIO, ETHERTYPE, 0} and construct a bit mask of all default
* priorities set by these entries.
*/
u8 dcb_ieee_getapp_default_prio_mask(const struct net_device *dev)
{
int ifindex = dev->ifindex;
struct dcb_app_type *itr;
u8 mask = 0;
spin_lock_bh(&dcb_lock);
list_for_each_entry(itr, &dcb_app_list, list) {
if (itr->ifindex == ifindex &&
itr->app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
itr->app.protocol == 0 &&
itr->app.priority < IEEE_8021QAZ_MAX_TCS)
mask |= 1 << itr->app.priority;
}
spin_unlock_bh(&dcb_lock);
return mask;
}
EXPORT_SYMBOL(dcb_ieee_getapp_default_prio_mask);
static void dcbnl_flush_dev(struct net_device *dev)
{
struct dcb_app_type *itr, *tmp;
spin_lock_bh(&dcb_lock);
list_for_each_entry_safe(itr, tmp, &dcb_app_list, list) {
if (itr->ifindex == dev->ifindex) {
list_del(&itr->list);
kfree(itr);
}
}
spin_unlock_bh(&dcb_lock);
}
static int dcbnl_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
switch (event) {
case NETDEV_UNREGISTER:
if (!dev->dcbnl_ops)
return NOTIFY_DONE;
dcbnl_flush_dev(dev);
return NOTIFY_OK;
default:
return NOTIFY_DONE;
}
}
static struct notifier_block dcbnl_nb __read_mostly = {
.notifier_call = dcbnl_netdevice_event,
};
static int __init dcbnl_init(void)
{
int err;
err = register_netdevice_notifier(&dcbnl_nb);
if (err)
return err;
rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, 0);
rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, 0);
return 0;
}
device_initcall(dcbnl_init);
| linux-master | net/dcb/dcbnl.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* IEEE 802.1Q Multiple VLAN Registration Protocol (MVRP)
*
* Copyright (c) 2012 Massachusetts Institute of Technology
*
* Adapted from code in net/8021q/vlan_gvrp.c
* Copyright (c) 2008 Patrick McHardy <[email protected]>
*/
#include <linux/types.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <net/mrp.h>
#include "vlan.h"
#define MRP_MVRP_ADDRESS { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x21 }
enum mvrp_attributes {
MVRP_ATTR_INVALID,
MVRP_ATTR_VID,
__MVRP_ATTR_MAX
};
#define MVRP_ATTR_MAX (__MVRP_ATTR_MAX - 1)
static struct mrp_application vlan_mrp_app __read_mostly = {
.type = MRP_APPLICATION_MVRP,
.maxattr = MVRP_ATTR_MAX,
.pkttype.type = htons(ETH_P_MVRP),
.group_address = MRP_MVRP_ADDRESS,
.version = 0,
};
int vlan_mvrp_request_join(const struct net_device *dev)
{
const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
__be16 vlan_id = htons(vlan->vlan_id);
if (vlan->vlan_proto != htons(ETH_P_8021Q))
return 0;
return mrp_request_join(vlan->real_dev, &vlan_mrp_app,
&vlan_id, sizeof(vlan_id), MVRP_ATTR_VID);
}
void vlan_mvrp_request_leave(const struct net_device *dev)
{
const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
__be16 vlan_id = htons(vlan->vlan_id);
if (vlan->vlan_proto != htons(ETH_P_8021Q))
return;
mrp_request_leave(vlan->real_dev, &vlan_mrp_app,
&vlan_id, sizeof(vlan_id), MVRP_ATTR_VID);
}
int vlan_mvrp_init_applicant(struct net_device *dev)
{
return mrp_init_applicant(dev, &vlan_mrp_app);
}
void vlan_mvrp_uninit_applicant(struct net_device *dev)
{
mrp_uninit_applicant(dev, &vlan_mrp_app);
}
int __init vlan_mvrp_init(void)
{
return mrp_register_application(&vlan_mrp_app);
}
void vlan_mvrp_uninit(void)
{
mrp_unregister_application(&vlan_mrp_app);
}
| linux-master | net/8021q/vlan_mvrp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* INET 802.1Q VLAN
* Ethernet-type device handling.
*
* Authors: Ben Greear <[email protected]>
* Please send support related email to: [email protected]
* VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
*
* Fixes:
* Fix for packet capture - Nick Eggleston <[email protected]>;
* Add HW acceleration hooks - David S. Miller <[email protected]>;
* Correct all the locking - David S. Miller <[email protected]>;
* Use hash table for VLAN groups - David S. Miller <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/rculist.h>
#include <net/p8022.h>
#include <net/arp.h>
#include <linux/rtnetlink.h>
#include <linux/notifier.h>
#include <net/rtnetlink.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <linux/uaccess.h>
#include <linux/if_vlan.h>
#include "vlan.h"
#include "vlanproc.h"
#define DRV_VERSION "1.8"
/* Global VLAN variables */
unsigned int vlan_net_id __read_mostly;
const char vlan_fullname[] = "802.1Q VLAN Support";
const char vlan_version[] = DRV_VERSION;
/* End of global variables definitions. */
static int vlan_group_prealloc_vid(struct vlan_group *vg,
__be16 vlan_proto, u16 vlan_id)
{
struct net_device **array;
unsigned int vidx;
unsigned int size;
int pidx;
ASSERT_RTNL();
pidx = vlan_proto_idx(vlan_proto);
if (pidx < 0)
return -EINVAL;
vidx = vlan_id / VLAN_GROUP_ARRAY_PART_LEN;
array = vg->vlan_devices_arrays[pidx][vidx];
if (array != NULL)
return 0;
size = sizeof(struct net_device *) * VLAN_GROUP_ARRAY_PART_LEN;
array = kzalloc(size, GFP_KERNEL_ACCOUNT);
if (array == NULL)
return -ENOBUFS;
/* paired with smp_rmb() in __vlan_group_get_device() */
smp_wmb();
vg->vlan_devices_arrays[pidx][vidx] = array;
return 0;
}
static void vlan_stacked_transfer_operstate(const struct net_device *rootdev,
struct net_device *dev,
struct vlan_dev_priv *vlan)
{
if (!(vlan->flags & VLAN_FLAG_BRIDGE_BINDING))
netif_stacked_transfer_operstate(rootdev, dev);
}
void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct net_device *real_dev = vlan->real_dev;
struct vlan_info *vlan_info;
struct vlan_group *grp;
u16 vlan_id = vlan->vlan_id;
ASSERT_RTNL();
vlan_info = rtnl_dereference(real_dev->vlan_info);
BUG_ON(!vlan_info);
grp = &vlan_info->grp;
grp->nr_vlan_devs--;
if (vlan->flags & VLAN_FLAG_MVRP)
vlan_mvrp_request_leave(dev);
if (vlan->flags & VLAN_FLAG_GVRP)
vlan_gvrp_request_leave(dev);
vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, NULL);
netdev_upper_dev_unlink(real_dev, dev);
/* Because unregister_netdevice_queue() makes sure at least one rcu
* grace period is respected before device freeing,
* we dont need to call synchronize_net() here.
*/
unregister_netdevice_queue(dev, head);
if (grp->nr_vlan_devs == 0) {
vlan_mvrp_uninit_applicant(real_dev);
vlan_gvrp_uninit_applicant(real_dev);
}
vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
}
int vlan_check_real_dev(struct net_device *real_dev,
__be16 protocol, u16 vlan_id,
struct netlink_ext_ack *extack)
{
const char *name = real_dev->name;
if (real_dev->features & NETIF_F_VLAN_CHALLENGED) {
pr_info("VLANs not supported on %s\n", name);
NL_SET_ERR_MSG_MOD(extack, "VLANs not supported on device");
return -EOPNOTSUPP;
}
if (vlan_find_dev(real_dev, protocol, vlan_id) != NULL) {
NL_SET_ERR_MSG_MOD(extack, "VLAN device already exists");
return -EEXIST;
}
return 0;
}
int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct net_device *real_dev = vlan->real_dev;
u16 vlan_id = vlan->vlan_id;
struct vlan_info *vlan_info;
struct vlan_group *grp;
int err;
err = vlan_vid_add(real_dev, vlan->vlan_proto, vlan_id);
if (err)
return err;
vlan_info = rtnl_dereference(real_dev->vlan_info);
/* vlan_info should be there now. vlan_vid_add took care of it */
BUG_ON(!vlan_info);
grp = &vlan_info->grp;
if (grp->nr_vlan_devs == 0) {
err = vlan_gvrp_init_applicant(real_dev);
if (err < 0)
goto out_vid_del;
err = vlan_mvrp_init_applicant(real_dev);
if (err < 0)
goto out_uninit_gvrp;
}
err = vlan_group_prealloc_vid(grp, vlan->vlan_proto, vlan_id);
if (err < 0)
goto out_uninit_mvrp;
err = register_netdevice(dev);
if (err < 0)
goto out_uninit_mvrp;
err = netdev_upper_dev_link(real_dev, dev, extack);
if (err)
goto out_unregister_netdev;
vlan_stacked_transfer_operstate(real_dev, dev, vlan);
linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */
/* So, got the sucker initialized, now lets place
* it into our local structure.
*/
vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, dev);
grp->nr_vlan_devs++;
return 0;
out_unregister_netdev:
unregister_netdevice(dev);
out_uninit_mvrp:
if (grp->nr_vlan_devs == 0)
vlan_mvrp_uninit_applicant(real_dev);
out_uninit_gvrp:
if (grp->nr_vlan_devs == 0)
vlan_gvrp_uninit_applicant(real_dev);
out_vid_del:
vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
return err;
}
/* Attach a VLAN device to a mac address (ie Ethernet Card).
* Returns 0 if the device was created or a negative error code otherwise.
*/
static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
{
struct net_device *new_dev;
struct vlan_dev_priv *vlan;
struct net *net = dev_net(real_dev);
struct vlan_net *vn = net_generic(net, vlan_net_id);
char name[IFNAMSIZ];
int err;
if (vlan_id >= VLAN_VID_MASK)
return -ERANGE;
err = vlan_check_real_dev(real_dev, htons(ETH_P_8021Q), vlan_id,
NULL);
if (err < 0)
return err;
/* Gotta set up the fields for the device. */
switch (vn->name_type) {
case VLAN_NAME_TYPE_RAW_PLUS_VID:
/* name will look like: eth1.0005 */
snprintf(name, IFNAMSIZ, "%s.%.4i", real_dev->name, vlan_id);
break;
case VLAN_NAME_TYPE_PLUS_VID_NO_PAD:
/* Put our vlan.VID in the name.
* Name will look like: vlan5
*/
snprintf(name, IFNAMSIZ, "vlan%i", vlan_id);
break;
case VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD:
/* Put our vlan.VID in the name.
* Name will look like: eth0.5
*/
snprintf(name, IFNAMSIZ, "%s.%i", real_dev->name, vlan_id);
break;
case VLAN_NAME_TYPE_PLUS_VID:
/* Put our vlan.VID in the name.
* Name will look like: vlan0005
*/
default:
snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id);
}
new_dev = alloc_netdev(sizeof(struct vlan_dev_priv), name,
NET_NAME_UNKNOWN, vlan_setup);
if (new_dev == NULL)
return -ENOBUFS;
dev_net_set(new_dev, net);
/* need 4 bytes for extra VLAN header info,
* hope the underlying device can handle it.
*/
new_dev->mtu = real_dev->mtu;
vlan = vlan_dev_priv(new_dev);
vlan->vlan_proto = htons(ETH_P_8021Q);
vlan->vlan_id = vlan_id;
vlan->real_dev = real_dev;
vlan->dent = NULL;
vlan->flags = VLAN_FLAG_REORDER_HDR;
new_dev->rtnl_link_ops = &vlan_link_ops;
err = register_vlan_dev(new_dev, NULL);
if (err < 0)
goto out_free_newdev;
return 0;
out_free_newdev:
free_netdev(new_dev);
return err;
}
static void vlan_sync_address(struct net_device *dev,
struct net_device *vlandev)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
/* May be called without an actual change */
if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr))
return;
/* vlan continues to inherit address of lower device */
if (vlan_dev_inherit_address(vlandev, dev))
goto out;
/* vlan address was different from the old address and is equal to
* the new address */
if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
dev_uc_del(dev, vlandev->dev_addr);
/* vlan address was equal to the old address and is different from
* the new address */
if (ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
!ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
dev_uc_add(dev, vlandev->dev_addr);
out:
ether_addr_copy(vlan->real_dev_addr, dev->dev_addr);
}
static void vlan_transfer_features(struct net_device *dev,
struct net_device *vlandev)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
netif_inherit_tso_max(vlandev, dev);
if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto))
vlandev->hard_header_len = dev->hard_header_len;
else
vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;
#if IS_ENABLED(CONFIG_FCOE)
vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
#endif
vlandev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
vlandev->priv_flags |= (vlan->real_dev->priv_flags & IFF_XMIT_DST_RELEASE);
vlandev->hw_enc_features = vlan_tnl_features(vlan->real_dev);
netdev_update_features(vlandev);
}
static int __vlan_device_event(struct net_device *dev, unsigned long event)
{
int err = 0;
switch (event) {
case NETDEV_CHANGENAME:
vlan_proc_rem_dev(dev);
err = vlan_proc_add_dev(dev);
break;
case NETDEV_REGISTER:
err = vlan_proc_add_dev(dev);
break;
case NETDEV_UNREGISTER:
vlan_proc_rem_dev(dev);
break;
}
return err;
}
static int vlan_device_event(struct notifier_block *unused, unsigned long event,
void *ptr)
{
struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct vlan_group *grp;
struct vlan_info *vlan_info;
int i, flgs;
struct net_device *vlandev;
struct vlan_dev_priv *vlan;
bool last = false;
LIST_HEAD(list);
int err;
if (is_vlan_dev(dev)) {
int err = __vlan_device_event(dev, event);
if (err)
return notifier_from_errno(err);
}
if ((event == NETDEV_UP) &&
(dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
pr_info("adding VLAN 0 to HW filter on device %s\n",
dev->name);
vlan_vid_add(dev, htons(ETH_P_8021Q), 0);
}
if (event == NETDEV_DOWN &&
(dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
vlan_info = rtnl_dereference(dev->vlan_info);
if (!vlan_info)
goto out;
grp = &vlan_info->grp;
/* It is OK that we do not hold the group lock right now,
* as we run under the RTNL lock.
*/
switch (event) {
case NETDEV_CHANGE:
/* Propagate real device state to vlan devices */
vlan_group_for_each_dev(grp, i, vlandev)
vlan_stacked_transfer_operstate(dev, vlandev,
vlan_dev_priv(vlandev));
break;
case NETDEV_CHANGEADDR:
/* Adjust unicast filters on underlying device */
vlan_group_for_each_dev(grp, i, vlandev) {
flgs = vlandev->flags;
if (!(flgs & IFF_UP))
continue;
vlan_sync_address(dev, vlandev);
}
break;
case NETDEV_CHANGEMTU:
vlan_group_for_each_dev(grp, i, vlandev) {
if (vlandev->mtu <= dev->mtu)
continue;
dev_set_mtu(vlandev, dev->mtu);
}
break;
case NETDEV_FEAT_CHANGE:
/* Propagate device features to underlying device */
vlan_group_for_each_dev(grp, i, vlandev)
vlan_transfer_features(dev, vlandev);
break;
case NETDEV_DOWN: {
struct net_device *tmp;
LIST_HEAD(close_list);
/* Put all VLANs for this dev in the down state too. */
vlan_group_for_each_dev(grp, i, vlandev) {
flgs = vlandev->flags;
if (!(flgs & IFF_UP))
continue;
vlan = vlan_dev_priv(vlandev);
if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
list_add(&vlandev->close_list, &close_list);
}
dev_close_many(&close_list, false);
list_for_each_entry_safe(vlandev, tmp, &close_list, close_list) {
vlan_stacked_transfer_operstate(dev, vlandev,
vlan_dev_priv(vlandev));
list_del_init(&vlandev->close_list);
}
list_del(&close_list);
break;
}
case NETDEV_UP:
/* Put all VLANs for this dev in the up state too. */
vlan_group_for_each_dev(grp, i, vlandev) {
flgs = dev_get_flags(vlandev);
if (flgs & IFF_UP)
continue;
vlan = vlan_dev_priv(vlandev);
if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
dev_change_flags(vlandev, flgs | IFF_UP,
extack);
vlan_stacked_transfer_operstate(dev, vlandev, vlan);
}
break;
case NETDEV_UNREGISTER:
/* twiddle thumbs on netns device moves */
if (dev->reg_state != NETREG_UNREGISTERING)
break;
vlan_group_for_each_dev(grp, i, vlandev) {
/* removal of last vid destroys vlan_info, abort
* afterwards */
if (vlan_info->nr_vids == 1)
last = true;
unregister_vlan_dev(vlandev, &list);
if (last)
break;
}
unregister_netdevice_many(&list);
break;
case NETDEV_PRE_TYPE_CHANGE:
/* Forbid underlaying device to change its type. */
if (vlan_uses_dev(dev))
return NOTIFY_BAD;
break;
case NETDEV_NOTIFY_PEERS:
case NETDEV_BONDING_FAILOVER:
case NETDEV_RESEND_IGMP:
/* Propagate to vlan devices */
vlan_group_for_each_dev(grp, i, vlandev)
call_netdevice_notifiers(event, vlandev);
break;
case NETDEV_CVLAN_FILTER_PUSH_INFO:
err = vlan_filter_push_vids(vlan_info, htons(ETH_P_8021Q));
if (err)
return notifier_from_errno(err);
break;
case NETDEV_CVLAN_FILTER_DROP_INFO:
vlan_filter_drop_vids(vlan_info, htons(ETH_P_8021Q));
break;
case NETDEV_SVLAN_FILTER_PUSH_INFO:
err = vlan_filter_push_vids(vlan_info, htons(ETH_P_8021AD));
if (err)
return notifier_from_errno(err);
break;
case NETDEV_SVLAN_FILTER_DROP_INFO:
vlan_filter_drop_vids(vlan_info, htons(ETH_P_8021AD));
break;
}
out:
return NOTIFY_DONE;
}
static struct notifier_block vlan_notifier_block __read_mostly = {
.notifier_call = vlan_device_event,
};
/*
* VLAN IOCTL handler.
* o execute requested action or pass command to the device driver
* arg is really a struct vlan_ioctl_args __user *.
*/
static int vlan_ioctl_handler(struct net *net, void __user *arg)
{
int err;
struct vlan_ioctl_args args;
struct net_device *dev = NULL;
if (copy_from_user(&args, arg, sizeof(struct vlan_ioctl_args)))
return -EFAULT;
/* Null terminate this sucker, just in case. */
args.device1[sizeof(args.device1) - 1] = 0;
args.u.device2[sizeof(args.u.device2) - 1] = 0;
rtnl_lock();
switch (args.cmd) {
case SET_VLAN_INGRESS_PRIORITY_CMD:
case SET_VLAN_EGRESS_PRIORITY_CMD:
case SET_VLAN_FLAG_CMD:
case ADD_VLAN_CMD:
case DEL_VLAN_CMD:
case GET_VLAN_REALDEV_NAME_CMD:
case GET_VLAN_VID_CMD:
err = -ENODEV;
dev = __dev_get_by_name(net, args.device1);
if (!dev)
goto out;
err = -EINVAL;
if (args.cmd != ADD_VLAN_CMD && !is_vlan_dev(dev))
goto out;
}
switch (args.cmd) {
case SET_VLAN_INGRESS_PRIORITY_CMD:
err = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
break;
vlan_dev_set_ingress_priority(dev,
args.u.skb_priority,
args.vlan_qos);
err = 0;
break;
case SET_VLAN_EGRESS_PRIORITY_CMD:
err = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
break;
err = vlan_dev_set_egress_priority(dev,
args.u.skb_priority,
args.vlan_qos);
break;
case SET_VLAN_FLAG_CMD:
err = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
break;
err = vlan_dev_change_flags(dev,
args.vlan_qos ? args.u.flag : 0,
args.u.flag);
break;
case SET_VLAN_NAME_TYPE_CMD:
err = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
break;
if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
struct vlan_net *vn;
vn = net_generic(net, vlan_net_id);
vn->name_type = args.u.name_type;
err = 0;
} else {
err = -EINVAL;
}
break;
case ADD_VLAN_CMD:
err = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
break;
err = register_vlan_device(dev, args.u.VID);
break;
case DEL_VLAN_CMD:
err = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
break;
unregister_vlan_dev(dev, NULL);
err = 0;
break;
case GET_VLAN_REALDEV_NAME_CMD:
err = 0;
vlan_dev_get_realdev_name(dev, args.u.device2,
sizeof(args.u.device2));
if (copy_to_user(arg, &args,
sizeof(struct vlan_ioctl_args)))
err = -EFAULT;
break;
case GET_VLAN_VID_CMD:
err = 0;
args.u.VID = vlan_dev_vlan_id(dev);
if (copy_to_user(arg, &args,
sizeof(struct vlan_ioctl_args)))
err = -EFAULT;
break;
default:
err = -EOPNOTSUPP;
break;
}
out:
rtnl_unlock();
return err;
}
static int __net_init vlan_init_net(struct net *net)
{
struct vlan_net *vn = net_generic(net, vlan_net_id);
int err;
vn->name_type = VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD;
err = vlan_proc_init(net);
return err;
}
static void __net_exit vlan_exit_net(struct net *net)
{
vlan_proc_cleanup(net);
}
static struct pernet_operations vlan_net_ops = {
.init = vlan_init_net,
.exit = vlan_exit_net,
.id = &vlan_net_id,
.size = sizeof(struct vlan_net),
};
static int __init vlan_proto_init(void)
{
int err;
pr_info("%s v%s\n", vlan_fullname, vlan_version);
err = register_pernet_subsys(&vlan_net_ops);
if (err < 0)
goto err0;
err = register_netdevice_notifier(&vlan_notifier_block);
if (err < 0)
goto err2;
err = vlan_gvrp_init();
if (err < 0)
goto err3;
err = vlan_mvrp_init();
if (err < 0)
goto err4;
err = vlan_netlink_init();
if (err < 0)
goto err5;
vlan_ioctl_set(vlan_ioctl_handler);
return 0;
err5:
vlan_mvrp_uninit();
err4:
vlan_gvrp_uninit();
err3:
unregister_netdevice_notifier(&vlan_notifier_block);
err2:
unregister_pernet_subsys(&vlan_net_ops);
err0:
return err;
}
static void __exit vlan_cleanup_module(void)
{
vlan_ioctl_set(NULL);
vlan_netlink_fini();
unregister_netdevice_notifier(&vlan_notifier_block);
unregister_pernet_subsys(&vlan_net_ops);
rcu_barrier(); /* Wait for completion of call_rcu()'s */
vlan_mvrp_uninit();
vlan_gvrp_uninit();
}
module_init(vlan_proto_init);
module_exit(vlan_cleanup_module);
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| linux-master | net/8021q/vlan.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* IEEE 802.1Q GARP VLAN Registration Protocol (GVRP)
*
* Copyright (c) 2008 Patrick McHardy <[email protected]>
*/
#include <linux/types.h>
#include <linux/if_vlan.h>
#include <net/garp.h>
#include "vlan.h"
#define GARP_GVRP_ADDRESS { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x21 }
enum gvrp_attributes {
GVRP_ATTR_INVALID,
GVRP_ATTR_VID,
__GVRP_ATTR_MAX
};
#define GVRP_ATTR_MAX (__GVRP_ATTR_MAX - 1)
static struct garp_application vlan_gvrp_app __read_mostly = {
.proto.group_address = GARP_GVRP_ADDRESS,
.maxattr = GVRP_ATTR_MAX,
.type = GARP_APPLICATION_GVRP,
};
int vlan_gvrp_request_join(const struct net_device *dev)
{
const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
__be16 vlan_id = htons(vlan->vlan_id);
if (vlan->vlan_proto != htons(ETH_P_8021Q))
return 0;
return garp_request_join(vlan->real_dev, &vlan_gvrp_app,
&vlan_id, sizeof(vlan_id), GVRP_ATTR_VID);
}
void vlan_gvrp_request_leave(const struct net_device *dev)
{
const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
__be16 vlan_id = htons(vlan->vlan_id);
if (vlan->vlan_proto != htons(ETH_P_8021Q))
return;
garp_request_leave(vlan->real_dev, &vlan_gvrp_app,
&vlan_id, sizeof(vlan_id), GVRP_ATTR_VID);
}
int vlan_gvrp_init_applicant(struct net_device *dev)
{
return garp_init_applicant(dev, &vlan_gvrp_app);
}
void vlan_gvrp_uninit_applicant(struct net_device *dev)
{
garp_uninit_applicant(dev, &vlan_gvrp_app);
}
int __init vlan_gvrp_init(void)
{
return garp_register_application(&vlan_gvrp_app);
}
void vlan_gvrp_uninit(void)
{
garp_unregister_application(&vlan_gvrp_app);
}
| linux-master | net/8021q/vlan_gvrp.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/netpoll.h>
#include <linux/export.h>
#include <net/gro.h>
#include "vlan.h"
bool vlan_do_receive(struct sk_buff **skbp)
{
struct sk_buff *skb = *skbp;
__be16 vlan_proto = skb->vlan_proto;
u16 vlan_id = skb_vlan_tag_get_id(skb);
struct net_device *vlan_dev;
struct vlan_pcpu_stats *rx_stats;
vlan_dev = vlan_find_dev(skb->dev, vlan_proto, vlan_id);
if (!vlan_dev)
return false;
skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
return false;
if (unlikely(!(vlan_dev->flags & IFF_UP))) {
kfree_skb(skb);
*skbp = NULL;
return false;
}
skb->dev = vlan_dev;
if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) {
/* Our lower layer thinks this is not local, let's make sure.
* This allows the VLAN to have a different MAC than the
* underlying device, and still route correctly. */
if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, vlan_dev->dev_addr))
skb->pkt_type = PACKET_HOST;
}
if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR) &&
!netif_is_macvlan_port(vlan_dev) &&
!netif_is_bridge_port(vlan_dev)) {
unsigned int offset = skb->data - skb_mac_header(skb);
/*
* vlan_insert_tag expect skb->data pointing to mac header.
* So change skb->data before calling it and change back to
* original position later
*/
skb_push(skb, offset);
skb = *skbp = vlan_insert_inner_tag(skb, skb->vlan_proto,
skb->vlan_tci, skb->mac_len);
if (!skb)
return false;
skb_pull(skb, offset + VLAN_HLEN);
skb_reset_mac_len(skb);
}
skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
__vlan_hwaccel_clear_tag(skb);
rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);
u64_stats_update_begin(&rx_stats->syncp);
u64_stats_inc(&rx_stats->rx_packets);
u64_stats_add(&rx_stats->rx_bytes, skb->len);
if (skb->pkt_type == PACKET_MULTICAST)
u64_stats_inc(&rx_stats->rx_multicast);
u64_stats_update_end(&rx_stats->syncp);
return true;
}
/* Must be invoked with rcu_read_lock. */
struct net_device *__vlan_find_dev_deep_rcu(struct net_device *dev,
__be16 vlan_proto, u16 vlan_id)
{
struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info);
if (vlan_info) {
return vlan_group_get_device(&vlan_info->grp,
vlan_proto, vlan_id);
} else {
/*
* Lower devices of master uppers (bonding, team) do not have
* grp assigned to themselves. Grp is assigned to upper device
* instead.
*/
struct net_device *upper_dev;
upper_dev = netdev_master_upper_dev_get_rcu(dev);
if (upper_dev)
return __vlan_find_dev_deep_rcu(upper_dev,
vlan_proto, vlan_id);
}
return NULL;
}
EXPORT_SYMBOL(__vlan_find_dev_deep_rcu);
struct net_device *vlan_dev_real_dev(const struct net_device *dev)
{
struct net_device *ret = vlan_dev_priv(dev)->real_dev;
while (is_vlan_dev(ret))
ret = vlan_dev_priv(ret)->real_dev;
return ret;
}
EXPORT_SYMBOL(vlan_dev_real_dev);
u16 vlan_dev_vlan_id(const struct net_device *dev)
{
return vlan_dev_priv(dev)->vlan_id;
}
EXPORT_SYMBOL(vlan_dev_vlan_id);
__be16 vlan_dev_vlan_proto(const struct net_device *dev)
{
return vlan_dev_priv(dev)->vlan_proto;
}
EXPORT_SYMBOL(vlan_dev_vlan_proto);
/*
* vlan info and vid list
*/
static void vlan_group_free(struct vlan_group *grp)
{
int i, j;
for (i = 0; i < VLAN_PROTO_NUM; i++)
for (j = 0; j < VLAN_GROUP_ARRAY_SPLIT_PARTS; j++)
kfree(grp->vlan_devices_arrays[i][j]);
}
static void vlan_info_free(struct vlan_info *vlan_info)
{
vlan_group_free(&vlan_info->grp);
kfree(vlan_info);
}
static void vlan_info_rcu_free(struct rcu_head *rcu)
{
vlan_info_free(container_of(rcu, struct vlan_info, rcu));
}
static struct vlan_info *vlan_info_alloc(struct net_device *dev)
{
struct vlan_info *vlan_info;
vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL);
if (!vlan_info)
return NULL;
vlan_info->real_dev = dev;
INIT_LIST_HEAD(&vlan_info->vid_list);
return vlan_info;
}
struct vlan_vid_info {
struct list_head list;
__be16 proto;
u16 vid;
int refcount;
};
static bool vlan_hw_filter_capable(const struct net_device *dev, __be16 proto)
{
if (proto == htons(ETH_P_8021Q) &&
dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
return true;
if (proto == htons(ETH_P_8021AD) &&
dev->features & NETIF_F_HW_VLAN_STAG_FILTER)
return true;
return false;
}
static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info,
__be16 proto, u16 vid)
{
struct vlan_vid_info *vid_info;
list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
if (vid_info->proto == proto && vid_info->vid == vid)
return vid_info;
}
return NULL;
}
static struct vlan_vid_info *vlan_vid_info_alloc(__be16 proto, u16 vid)
{
struct vlan_vid_info *vid_info;
vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL);
if (!vid_info)
return NULL;
vid_info->proto = proto;
vid_info->vid = vid;
return vid_info;
}
static int vlan_add_rx_filter_info(struct net_device *dev, __be16 proto, u16 vid)
{
if (!vlan_hw_filter_capable(dev, proto))
return 0;
if (netif_device_present(dev))
return dev->netdev_ops->ndo_vlan_rx_add_vid(dev, proto, vid);
else
return -ENODEV;
}
static int vlan_kill_rx_filter_info(struct net_device *dev, __be16 proto, u16 vid)
{
if (!vlan_hw_filter_capable(dev, proto))
return 0;
if (netif_device_present(dev))
return dev->netdev_ops->ndo_vlan_rx_kill_vid(dev, proto, vid);
else
return -ENODEV;
}
int vlan_for_each(struct net_device *dev,
int (*action)(struct net_device *dev, int vid, void *arg),
void *arg)
{
struct vlan_vid_info *vid_info;
struct vlan_info *vlan_info;
struct net_device *vdev;
int ret;
ASSERT_RTNL();
vlan_info = rtnl_dereference(dev->vlan_info);
if (!vlan_info)
return 0;
list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
vdev = vlan_group_get_device(&vlan_info->grp, vid_info->proto,
vid_info->vid);
ret = action(vdev, vid_info->vid, arg);
if (ret)
return ret;
}
return 0;
}
EXPORT_SYMBOL(vlan_for_each);
int vlan_filter_push_vids(struct vlan_info *vlan_info, __be16 proto)
{
struct net_device *real_dev = vlan_info->real_dev;
struct vlan_vid_info *vlan_vid_info;
int err;
list_for_each_entry(vlan_vid_info, &vlan_info->vid_list, list) {
if (vlan_vid_info->proto == proto) {
err = vlan_add_rx_filter_info(real_dev, proto,
vlan_vid_info->vid);
if (err)
goto unwind;
}
}
return 0;
unwind:
list_for_each_entry_continue_reverse(vlan_vid_info,
&vlan_info->vid_list, list) {
if (vlan_vid_info->proto == proto)
vlan_kill_rx_filter_info(real_dev, proto,
vlan_vid_info->vid);
}
return err;
}
EXPORT_SYMBOL(vlan_filter_push_vids);
void vlan_filter_drop_vids(struct vlan_info *vlan_info, __be16 proto)
{
struct vlan_vid_info *vlan_vid_info;
list_for_each_entry(vlan_vid_info, &vlan_info->vid_list, list)
if (vlan_vid_info->proto == proto)
vlan_kill_rx_filter_info(vlan_info->real_dev,
vlan_vid_info->proto,
vlan_vid_info->vid);
}
EXPORT_SYMBOL(vlan_filter_drop_vids);
static int __vlan_vid_add(struct vlan_info *vlan_info, __be16 proto, u16 vid,
struct vlan_vid_info **pvid_info)
{
struct net_device *dev = vlan_info->real_dev;
struct vlan_vid_info *vid_info;
int err;
vid_info = vlan_vid_info_alloc(proto, vid);
if (!vid_info)
return -ENOMEM;
err = vlan_add_rx_filter_info(dev, proto, vid);
if (err) {
kfree(vid_info);
return err;
}
list_add(&vid_info->list, &vlan_info->vid_list);
vlan_info->nr_vids++;
*pvid_info = vid_info;
return 0;
}
int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
{
struct vlan_info *vlan_info;
struct vlan_vid_info *vid_info;
bool vlan_info_created = false;
int err;
ASSERT_RTNL();
vlan_info = rtnl_dereference(dev->vlan_info);
if (!vlan_info) {
vlan_info = vlan_info_alloc(dev);
if (!vlan_info)
return -ENOMEM;
vlan_info_created = true;
}
vid_info = vlan_vid_info_get(vlan_info, proto, vid);
if (!vid_info) {
err = __vlan_vid_add(vlan_info, proto, vid, &vid_info);
if (err)
goto out_free_vlan_info;
}
vid_info->refcount++;
if (vlan_info_created)
rcu_assign_pointer(dev->vlan_info, vlan_info);
return 0;
out_free_vlan_info:
if (vlan_info_created)
kfree(vlan_info);
return err;
}
EXPORT_SYMBOL(vlan_vid_add);
static void __vlan_vid_del(struct vlan_info *vlan_info,
struct vlan_vid_info *vid_info)
{
struct net_device *dev = vlan_info->real_dev;
__be16 proto = vid_info->proto;
u16 vid = vid_info->vid;
int err;
err = vlan_kill_rx_filter_info(dev, proto, vid);
if (err && dev->reg_state != NETREG_UNREGISTERING)
netdev_warn(dev, "failed to kill vid %04x/%d\n", proto, vid);
list_del(&vid_info->list);
kfree(vid_info);
vlan_info->nr_vids--;
}
void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid)
{
struct vlan_info *vlan_info;
struct vlan_vid_info *vid_info;
ASSERT_RTNL();
vlan_info = rtnl_dereference(dev->vlan_info);
if (!vlan_info)
return;
vid_info = vlan_vid_info_get(vlan_info, proto, vid);
if (!vid_info)
return;
vid_info->refcount--;
if (vid_info->refcount == 0) {
__vlan_vid_del(vlan_info, vid_info);
if (vlan_info->nr_vids == 0) {
RCU_INIT_POINTER(dev->vlan_info, NULL);
call_rcu(&vlan_info->rcu, vlan_info_rcu_free);
}
}
}
EXPORT_SYMBOL(vlan_vid_del);
int vlan_vids_add_by_dev(struct net_device *dev,
const struct net_device *by_dev)
{
struct vlan_vid_info *vid_info;
struct vlan_info *vlan_info;
int err;
ASSERT_RTNL();
vlan_info = rtnl_dereference(by_dev->vlan_info);
if (!vlan_info)
return 0;
list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
err = vlan_vid_add(dev, vid_info->proto, vid_info->vid);
if (err)
goto unwind;
}
return 0;
unwind:
list_for_each_entry_continue_reverse(vid_info,
&vlan_info->vid_list,
list) {
vlan_vid_del(dev, vid_info->proto, vid_info->vid);
}
return err;
}
EXPORT_SYMBOL(vlan_vids_add_by_dev);
void vlan_vids_del_by_dev(struct net_device *dev,
const struct net_device *by_dev)
{
struct vlan_vid_info *vid_info;
struct vlan_info *vlan_info;
ASSERT_RTNL();
vlan_info = rtnl_dereference(by_dev->vlan_info);
if (!vlan_info)
return;
list_for_each_entry(vid_info, &vlan_info->vid_list, list)
vlan_vid_del(dev, vid_info->proto, vid_info->vid);
}
EXPORT_SYMBOL(vlan_vids_del_by_dev);
bool vlan_uses_dev(const struct net_device *dev)
{
struct vlan_info *vlan_info;
ASSERT_RTNL();
vlan_info = rtnl_dereference(dev->vlan_info);
if (!vlan_info)
return false;
return vlan_info->grp.nr_vlan_devs ? true : false;
}
EXPORT_SYMBOL(vlan_uses_dev);
static struct sk_buff *vlan_gro_receive(struct list_head *head,
struct sk_buff *skb)
{
const struct packet_offload *ptype;
unsigned int hlen, off_vlan;
struct sk_buff *pp = NULL;
struct vlan_hdr *vhdr;
struct sk_buff *p;
__be16 type;
int flush = 1;
off_vlan = skb_gro_offset(skb);
hlen = off_vlan + sizeof(*vhdr);
vhdr = skb_gro_header(skb, hlen, off_vlan);
if (unlikely(!vhdr))
goto out;
type = vhdr->h_vlan_encapsulated_proto;
ptype = gro_find_receive_by_type(type);
if (!ptype)
goto out;
flush = 0;
list_for_each_entry(p, head, list) {
struct vlan_hdr *vhdr2;
if (!NAPI_GRO_CB(p)->same_flow)
continue;
vhdr2 = (struct vlan_hdr *)(p->data + off_vlan);
if (compare_vlan_header(vhdr, vhdr2))
NAPI_GRO_CB(p)->same_flow = 0;
}
skb_gro_pull(skb, sizeof(*vhdr));
skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
pp = indirect_call_gro_receive_inet(ptype->callbacks.gro_receive,
ipv6_gro_receive, inet_gro_receive,
head, skb);
out:
skb_gro_flush_final(skb, pp, flush);
return pp;
}
static int vlan_gro_complete(struct sk_buff *skb, int nhoff)
{
struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + nhoff);
__be16 type = vhdr->h_vlan_encapsulated_proto;
struct packet_offload *ptype;
int err = -ENOENT;
ptype = gro_find_complete_by_type(type);
if (ptype)
err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
ipv6_gro_complete, inet_gro_complete,
skb, nhoff + sizeof(*vhdr));
return err;
}
static struct packet_offload vlan_packet_offloads[] __read_mostly = {
{
.type = cpu_to_be16(ETH_P_8021Q),
.priority = 10,
.callbacks = {
.gro_receive = vlan_gro_receive,
.gro_complete = vlan_gro_complete,
},
},
{
.type = cpu_to_be16(ETH_P_8021AD),
.priority = 10,
.callbacks = {
.gro_receive = vlan_gro_receive,
.gro_complete = vlan_gro_complete,
},
},
};
static int __init vlan_offload_init(void)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
dev_add_offload(&vlan_packet_offloads[i]);
return 0;
}
fs_initcall(vlan_offload_init);
| linux-master | net/8021q/vlan_core.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/******************************************************************************
* vlanproc.c VLAN Module. /proc filesystem interface.
*
* This module is completely hardware-independent and provides
* access to the router using Linux /proc filesystem.
*
* Author: Ben Greear, <[email protected]> coppied from wanproc.c
* by: Gene Kozin <[email protected]>
*
* Copyright: (c) 1998 Ben Greear
*
* ============================================================================
* Jan 20, 1998 Ben Greear Initial Version
*****************************************************************************/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/fs.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include "vlanproc.h"
#include "vlan.h"
/****** Function Prototypes *************************************************/
/* Methods for preparing data for reading proc entries */
static int vlan_seq_show(struct seq_file *seq, void *v);
static void *vlan_seq_start(struct seq_file *seq, loff_t *pos);
static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos);
static void vlan_seq_stop(struct seq_file *seq, void *);
static int vlandev_seq_show(struct seq_file *seq, void *v);
/*
* Global Data
*/
/*
* Names of the proc directory entries
*/
static const char name_root[] = "vlan";
static const char name_conf[] = "config";
/*
* Structures for interfacing with the /proc filesystem.
* VLAN creates its own directory /proc/net/vlan with the following
* entries:
* config device status/configuration
* <device> entry for each device
*/
/*
* Generic /proc/net/vlan/<file> file and inode operations
*/
static const struct seq_operations vlan_seq_ops = {
.start = vlan_seq_start,
.next = vlan_seq_next,
.stop = vlan_seq_stop,
.show = vlan_seq_show,
};
/*
* Proc filesystem directory entries.
*/
/* Strings */
static const char *const vlan_name_type_str[VLAN_NAME_TYPE_HIGHEST] = {
[VLAN_NAME_TYPE_RAW_PLUS_VID] = "VLAN_NAME_TYPE_RAW_PLUS_VID",
[VLAN_NAME_TYPE_PLUS_VID_NO_PAD] = "VLAN_NAME_TYPE_PLUS_VID_NO_PAD",
[VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD] = "VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD",
[VLAN_NAME_TYPE_PLUS_VID] = "VLAN_NAME_TYPE_PLUS_VID",
};
/*
* Interface functions
*/
/*
* Clean up /proc/net/vlan entries
*/
void vlan_proc_cleanup(struct net *net)
{
struct vlan_net *vn = net_generic(net, vlan_net_id);
if (vn->proc_vlan_conf)
remove_proc_entry(name_conf, vn->proc_vlan_dir);
if (vn->proc_vlan_dir)
remove_proc_entry(name_root, net->proc_net);
/* Dynamically added entries should be cleaned up as their vlan_device
* is removed, so we should not have to take care of it here...
*/
}
/*
* Create /proc/net/vlan entries
*/
int __net_init vlan_proc_init(struct net *net)
{
struct vlan_net *vn = net_generic(net, vlan_net_id);
vn->proc_vlan_dir = proc_net_mkdir(net, name_root, net->proc_net);
if (!vn->proc_vlan_dir)
goto err;
vn->proc_vlan_conf = proc_create_net(name_conf, S_IFREG | 0600,
vn->proc_vlan_dir, &vlan_seq_ops,
sizeof(struct seq_net_private));
if (!vn->proc_vlan_conf)
goto err;
return 0;
err:
pr_err("can't create entry in proc filesystem!\n");
vlan_proc_cleanup(net);
return -ENOBUFS;
}
/*
* Add directory entry for VLAN device.
*/
int vlan_proc_add_dev(struct net_device *vlandev)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
struct vlan_net *vn = net_generic(dev_net(vlandev), vlan_net_id);
if (!strcmp(vlandev->name, name_conf))
return -EINVAL;
vlan->dent = proc_create_single_data(vlandev->name, S_IFREG | 0600,
vn->proc_vlan_dir, vlandev_seq_show, vlandev);
if (!vlan->dent)
return -ENOBUFS;
return 0;
}
/*
* Delete directory entry for VLAN device.
*/
void vlan_proc_rem_dev(struct net_device *vlandev)
{
/** NOTE: This will consume the memory pointed to by dent, it seems. */
proc_remove(vlan_dev_priv(vlandev)->dent);
vlan_dev_priv(vlandev)->dent = NULL;
}
/****** Proc filesystem entry points ****************************************/
/*
* The following few functions build the content of /proc/net/vlan/config
*/
/* start read of /proc/net/vlan/config */
static void *vlan_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(rcu)
{
struct net_device *dev;
struct net *net = seq_file_net(seq);
loff_t i = 1;
rcu_read_lock();
if (*pos == 0)
return SEQ_START_TOKEN;
for_each_netdev_rcu(net, dev) {
if (!is_vlan_dev(dev))
continue;
if (i++ == *pos)
return dev;
}
return NULL;
}
static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct net_device *dev;
struct net *net = seq_file_net(seq);
++*pos;
dev = v;
if (v == SEQ_START_TOKEN)
dev = net_device_entry(&net->dev_base_head);
for_each_netdev_continue_rcu(net, dev) {
if (!is_vlan_dev(dev))
continue;
return dev;
}
return NULL;
}
static void vlan_seq_stop(struct seq_file *seq, void *v)
__releases(rcu)
{
rcu_read_unlock();
}
static int vlan_seq_show(struct seq_file *seq, void *v)
{
struct net *net = seq_file_net(seq);
struct vlan_net *vn = net_generic(net, vlan_net_id);
if (v == SEQ_START_TOKEN) {
const char *nmtype = NULL;
seq_puts(seq, "VLAN Dev name | VLAN ID\n");
if (vn->name_type < ARRAY_SIZE(vlan_name_type_str))
nmtype = vlan_name_type_str[vn->name_type];
seq_printf(seq, "Name-Type: %s\n",
nmtype ? nmtype : "UNKNOWN");
} else {
const struct net_device *vlandev = v;
const struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
seq_printf(seq, "%-15s| %d | %s\n", vlandev->name,
vlan->vlan_id, vlan->real_dev->name);
}
return 0;
}
static int vlandev_seq_show(struct seq_file *seq, void *offset)
{
struct net_device *vlandev = (struct net_device *) seq->private;
const struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
struct rtnl_link_stats64 temp;
const struct rtnl_link_stats64 *stats;
static const char fmt64[] = "%30s %12llu\n";
int i;
if (!is_vlan_dev(vlandev))
return 0;
stats = dev_get_stats(vlandev, &temp);
seq_printf(seq,
"%s VID: %d REORDER_HDR: %i dev->priv_flags: %llx\n",
vlandev->name, vlan->vlan_id,
(int)(vlan->flags & 1), vlandev->priv_flags);
seq_printf(seq, fmt64, "total frames received", stats->rx_packets);
seq_printf(seq, fmt64, "total bytes received", stats->rx_bytes);
seq_printf(seq, fmt64, "Broadcast/Multicast Rcvd", stats->multicast);
seq_puts(seq, "\n");
seq_printf(seq, fmt64, "total frames transmitted", stats->tx_packets);
seq_printf(seq, fmt64, "total bytes transmitted", stats->tx_bytes);
seq_printf(seq, "Device: %s", vlan->real_dev->name);
/* now show all PRIORITY mappings relating to this VLAN */
seq_printf(seq, "\nINGRESS priority mappings: "
"0:%u 1:%u 2:%u 3:%u 4:%u 5:%u 6:%u 7:%u\n",
vlan->ingress_priority_map[0],
vlan->ingress_priority_map[1],
vlan->ingress_priority_map[2],
vlan->ingress_priority_map[3],
vlan->ingress_priority_map[4],
vlan->ingress_priority_map[5],
vlan->ingress_priority_map[6],
vlan->ingress_priority_map[7]);
seq_printf(seq, " EGRESS priority mappings: ");
for (i = 0; i < 16; i++) {
const struct vlan_priority_tci_mapping *mp
= vlan->egress_priority_map[i];
while (mp) {
seq_printf(seq, "%u:%d ",
mp->priority, ((mp->vlan_qos >> 13) & 0x7));
mp = mp->next;
}
}
seq_puts(seq, "\n");
return 0;
}
| linux-master | net/8021q/vlanproc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* -*- linux-c -*-
* INET 802.1Q VLAN
* Ethernet-type device handling.
*
* Authors: Ben Greear <[email protected]>
* Please send support related email to: [email protected]
* VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
*
* Fixes: Mar 22 2001: Martin Bokaemper <[email protected]>
* - reset skb->pkt_type on incoming packets when MAC was changed
* - see that changed MAC is saddr for outgoing packets
* Oct 20, 2001: Ard van Breeman:
* - Fix MC-list, finally.
* - Flush MC-list on VLAN destroy.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/net_tstamp.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <net/arp.h>
#include <net/macsec.h>
#include "vlan.h"
#include "vlanproc.h"
#include <linux/if_vlan.h>
#include <linux/netpoll.h>
/*
* Create the VLAN header for an arbitrary protocol layer
*
* saddr=NULL means use device source address
* daddr=NULL means leave destination address (eg unresolved arp)
*
* This is called when the SKB is moving down the stack towards the
* physical devices.
*/
static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr,
unsigned int len)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct vlan_hdr *vhdr;
unsigned int vhdrlen = 0;
u16 vlan_tci = 0;
int rc;
if (!(vlan->flags & VLAN_FLAG_REORDER_HDR)) {
vhdr = skb_push(skb, VLAN_HLEN);
vlan_tci = vlan->vlan_id;
vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority);
vhdr->h_vlan_TCI = htons(vlan_tci);
/*
* Set the protocol type. For a packet of type ETH_P_802_3/2 we
* put the length in here instead.
*/
if (type != ETH_P_802_3 && type != ETH_P_802_2)
vhdr->h_vlan_encapsulated_proto = htons(type);
else
vhdr->h_vlan_encapsulated_proto = htons(len);
skb->protocol = vlan->vlan_proto;
type = ntohs(vlan->vlan_proto);
vhdrlen = VLAN_HLEN;
}
/* Before delegating work to the lower layer, enter our MAC-address */
if (saddr == NULL)
saddr = dev->dev_addr;
/* Now make the underlying real hard header */
dev = vlan->real_dev;
rc = dev_hard_header(skb, dev, type, daddr, saddr, len + vhdrlen);
if (rc > 0)
rc += vhdrlen;
return rc;
}
static inline netdev_tx_t vlan_netpoll_send_skb(struct vlan_dev_priv *vlan, struct sk_buff *skb)
{
#ifdef CONFIG_NET_POLL_CONTROLLER
return netpoll_send_skb(vlan->netpoll, skb);
#else
BUG();
return NETDEV_TX_OK;
#endif
}
static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
unsigned int len;
int ret;
/* Handle non-VLAN frames if they are sent to us, for example by DHCP.
*
* NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING
* OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
*/
if (vlan->flags & VLAN_FLAG_REORDER_HDR ||
veth->h_vlan_proto != vlan->vlan_proto) {
u16 vlan_tci;
vlan_tci = vlan->vlan_id;
vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority);
__vlan_hwaccel_put_tag(skb, vlan->vlan_proto, vlan_tci);
}
skb->dev = vlan->real_dev;
len = skb->len;
if (unlikely(netpoll_tx_running(dev)))
return vlan_netpoll_send_skb(vlan, skb);
ret = dev_queue_xmit(skb);
if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
struct vlan_pcpu_stats *stats;
stats = this_cpu_ptr(vlan->vlan_pcpu_stats);
u64_stats_update_begin(&stats->syncp);
u64_stats_inc(&stats->tx_packets);
u64_stats_add(&stats->tx_bytes, len);
u64_stats_update_end(&stats->syncp);
} else {
this_cpu_inc(vlan->vlan_pcpu_stats->tx_dropped);
}
return ret;
}
static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
unsigned int max_mtu = real_dev->mtu;
if (netif_reduces_vlan_mtu(real_dev))
max_mtu -= VLAN_HLEN;
if (max_mtu < new_mtu)
return -ERANGE;
dev->mtu = new_mtu;
return 0;
}
void vlan_dev_set_ingress_priority(const struct net_device *dev,
u32 skb_prio, u16 vlan_prio)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
if (vlan->ingress_priority_map[vlan_prio & 0x7] && !skb_prio)
vlan->nr_ingress_mappings--;
else if (!vlan->ingress_priority_map[vlan_prio & 0x7] && skb_prio)
vlan->nr_ingress_mappings++;
vlan->ingress_priority_map[vlan_prio & 0x7] = skb_prio;
}
int vlan_dev_set_egress_priority(const struct net_device *dev,
u32 skb_prio, u16 vlan_prio)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct vlan_priority_tci_mapping *mp = NULL;
struct vlan_priority_tci_mapping *np;
u32 vlan_qos = (vlan_prio << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK;
/* See if a priority mapping exists.. */
mp = vlan->egress_priority_map[skb_prio & 0xF];
while (mp) {
if (mp->priority == skb_prio) {
if (mp->vlan_qos && !vlan_qos)
vlan->nr_egress_mappings--;
else if (!mp->vlan_qos && vlan_qos)
vlan->nr_egress_mappings++;
mp->vlan_qos = vlan_qos;
return 0;
}
mp = mp->next;
}
/* Create a new mapping then. */
mp = vlan->egress_priority_map[skb_prio & 0xF];
np = kmalloc(sizeof(struct vlan_priority_tci_mapping), GFP_KERNEL);
if (!np)
return -ENOBUFS;
np->next = mp;
np->priority = skb_prio;
np->vlan_qos = vlan_qos;
/* Before inserting this element in hash table, make sure all its fields
* are committed to memory.
* coupled with smp_rmb() in vlan_dev_get_egress_qos_mask()
*/
smp_wmb();
vlan->egress_priority_map[skb_prio & 0xF] = np;
if (vlan_qos)
vlan->nr_egress_mappings++;
return 0;
}
/* Flags are defined in the vlan_flags enum in
* include/uapi/linux/if_vlan.h file.
*/
int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
u32 old_flags = vlan->flags;
if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP |
VLAN_FLAG_LOOSE_BINDING | VLAN_FLAG_MVRP |
VLAN_FLAG_BRIDGE_BINDING))
return -EINVAL;
vlan->flags = (old_flags & ~mask) | (flags & mask);
if (netif_running(dev) && (vlan->flags ^ old_flags) & VLAN_FLAG_GVRP) {
if (vlan->flags & VLAN_FLAG_GVRP)
vlan_gvrp_request_join(dev);
else
vlan_gvrp_request_leave(dev);
}
if (netif_running(dev) && (vlan->flags ^ old_flags) & VLAN_FLAG_MVRP) {
if (vlan->flags & VLAN_FLAG_MVRP)
vlan_mvrp_request_join(dev);
else
vlan_mvrp_request_leave(dev);
}
return 0;
}
void vlan_dev_get_realdev_name(const struct net_device *dev, char *result, size_t size)
{
strscpy_pad(result, vlan_dev_priv(dev)->real_dev->name, size);
}
bool vlan_dev_inherit_address(struct net_device *dev,
struct net_device *real_dev)
{
if (dev->addr_assign_type != NET_ADDR_STOLEN)
return false;
eth_hw_addr_set(dev, real_dev->dev_addr);
call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
return true;
}
static int vlan_dev_open(struct net_device *dev)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct net_device *real_dev = vlan->real_dev;
int err;
if (!(real_dev->flags & IFF_UP) &&
!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
return -ENETDOWN;
if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr) &&
!vlan_dev_inherit_address(dev, real_dev)) {
err = dev_uc_add(real_dev, dev->dev_addr);
if (err < 0)
goto out;
}
if (dev->flags & IFF_ALLMULTI) {
err = dev_set_allmulti(real_dev, 1);
if (err < 0)
goto del_unicast;
}
if (dev->flags & IFF_PROMISC) {
err = dev_set_promiscuity(real_dev, 1);
if (err < 0)
goto clear_allmulti;
}
ether_addr_copy(vlan->real_dev_addr, real_dev->dev_addr);
if (vlan->flags & VLAN_FLAG_GVRP)
vlan_gvrp_request_join(dev);
if (vlan->flags & VLAN_FLAG_MVRP)
vlan_mvrp_request_join(dev);
if (netif_carrier_ok(real_dev) &&
!(vlan->flags & VLAN_FLAG_BRIDGE_BINDING))
netif_carrier_on(dev);
return 0;
clear_allmulti:
if (dev->flags & IFF_ALLMULTI)
dev_set_allmulti(real_dev, -1);
del_unicast:
if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
dev_uc_del(real_dev, dev->dev_addr);
out:
netif_carrier_off(dev);
return err;
}
static int vlan_dev_stop(struct net_device *dev)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct net_device *real_dev = vlan->real_dev;
dev_mc_unsync(real_dev, dev);
dev_uc_unsync(real_dev, dev);
if (dev->flags & IFF_ALLMULTI)
dev_set_allmulti(real_dev, -1);
if (dev->flags & IFF_PROMISC)
dev_set_promiscuity(real_dev, -1);
if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
dev_uc_del(real_dev, dev->dev_addr);
if (!(vlan->flags & VLAN_FLAG_BRIDGE_BINDING))
netif_carrier_off(dev);
return 0;
}
static int vlan_dev_set_mac_address(struct net_device *dev, void *p)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
struct sockaddr *addr = p;
int err;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
if (!(dev->flags & IFF_UP))
goto out;
if (!ether_addr_equal(addr->sa_data, real_dev->dev_addr)) {
err = dev_uc_add(real_dev, addr->sa_data);
if (err < 0)
return err;
}
if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
dev_uc_del(real_dev, dev->dev_addr);
out:
eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
static int vlan_hwtstamp_get(struct net_device *dev,
struct kernel_hwtstamp_config *cfg)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
return generic_hwtstamp_get_lower(real_dev, cfg);
}
static int vlan_hwtstamp_set(struct net_device *dev,
struct kernel_hwtstamp_config *cfg,
struct netlink_ext_ack *extack)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
if (!net_eq(dev_net(dev), dev_net(real_dev)))
return -EOPNOTSUPP;
return generic_hwtstamp_set_lower(real_dev, cfg, extack);
}
static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
struct ifreq ifrr;
int err = -EOPNOTSUPP;
strscpy_pad(ifrr.ifr_name, real_dev->name, IFNAMSIZ);
ifrr.ifr_ifru = ifr->ifr_ifru;
switch (cmd) {
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
if (netif_device_present(real_dev) && ops->ndo_eth_ioctl)
err = ops->ndo_eth_ioctl(real_dev, &ifrr, cmd);
break;
}
if (!err)
ifr->ifr_ifru = ifrr.ifr_ifru;
return err;
}
static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
int err = 0;
if (netif_device_present(real_dev) && ops->ndo_neigh_setup)
err = ops->ndo_neigh_setup(real_dev, pa);
return err;
}
#if IS_ENABLED(CONFIG_FCOE)
static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid,
struct scatterlist *sgl, unsigned int sgc)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
int rc = 0;
if (ops->ndo_fcoe_ddp_setup)
rc = ops->ndo_fcoe_ddp_setup(real_dev, xid, sgl, sgc);
return rc;
}
static int vlan_dev_fcoe_ddp_done(struct net_device *dev, u16 xid)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
int len = 0;
if (ops->ndo_fcoe_ddp_done)
len = ops->ndo_fcoe_ddp_done(real_dev, xid);
return len;
}
static int vlan_dev_fcoe_enable(struct net_device *dev)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
int rc = -EINVAL;
if (ops->ndo_fcoe_enable)
rc = ops->ndo_fcoe_enable(real_dev);
return rc;
}
static int vlan_dev_fcoe_disable(struct net_device *dev)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
int rc = -EINVAL;
if (ops->ndo_fcoe_disable)
rc = ops->ndo_fcoe_disable(real_dev);
return rc;
}
static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid,
struct scatterlist *sgl, unsigned int sgc)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
int rc = 0;
if (ops->ndo_fcoe_ddp_target)
rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc);
return rc;
}
#endif
#ifdef NETDEV_FCOE_WWNN
static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
int rc = -EINVAL;
if (ops->ndo_fcoe_get_wwn)
rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type);
return rc;
}
#endif
static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
if (dev->flags & IFF_UP) {
if (change & IFF_ALLMULTI)
dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
if (change & IFF_PROMISC)
dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1);
}
}
static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
{
dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
}
/*
* vlan network devices have devices nesting below it, and are a special
* "super class" of normal network devices; split their locks off into a
* separate class since they always nest.
*/
static struct lock_class_key vlan_netdev_xmit_lock_key;
static struct lock_class_key vlan_netdev_addr_lock_key;
static void vlan_dev_set_lockdep_one(struct net_device *dev,
struct netdev_queue *txq,
void *unused)
{
lockdep_set_class(&txq->_xmit_lock, &vlan_netdev_xmit_lock_key);
}
static void vlan_dev_set_lockdep_class(struct net_device *dev)
{
lockdep_set_class(&dev->addr_list_lock,
&vlan_netdev_addr_lock_key);
netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, NULL);
}
static __be16 vlan_parse_protocol(const struct sk_buff *skb)
{
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
return __vlan_get_protocol(skb, veth->h_vlan_proto, NULL);
}
static const struct header_ops vlan_header_ops = {
.create = vlan_dev_hard_header,
.parse = eth_header_parse,
.parse_protocol = vlan_parse_protocol,
};
static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr,
unsigned int len)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct net_device *real_dev = vlan->real_dev;
if (saddr == NULL)
saddr = dev->dev_addr;
return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
}
static const struct header_ops vlan_passthru_header_ops = {
.create = vlan_passthru_hard_header,
.parse = eth_header_parse,
.parse_protocol = vlan_parse_protocol,
};
static struct device_type vlan_type = {
.name = "vlan",
};
static const struct net_device_ops vlan_netdev_ops;
static int vlan_dev_init(struct net_device *dev)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct net_device *real_dev = vlan->real_dev;
netif_carrier_off(dev);
/* IFF_BROADCAST|IFF_MULTICAST; ??? */
dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
IFF_MASTER | IFF_SLAVE);
dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) |
(1<<__LINK_STATE_DORMANT))) |
(1<<__LINK_STATE_PRESENT);
if (vlan->flags & VLAN_FLAG_BRIDGE_BINDING)
dev->state |= (1 << __LINK_STATE_NOCARRIER);
dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG |
NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE |
NETIF_F_GSO_ENCAP_ALL |
NETIF_F_HIGHDMA | NETIF_F_SCTP_CRC |
NETIF_F_ALL_FCOE;
if (real_dev->vlan_features & NETIF_F_HW_MACSEC)
dev->hw_features |= NETIF_F_HW_MACSEC;
dev->features |= dev->hw_features | NETIF_F_LLTX;
netif_inherit_tso_max(dev, real_dev);
if (dev->features & NETIF_F_VLAN_FEATURES)
netdev_warn(real_dev, "VLAN features are set incorrectly. Q-in-Q configurations may not work correctly.\n");
dev->vlan_features = real_dev->vlan_features & ~NETIF_F_ALL_FCOE;
dev->hw_enc_features = vlan_tnl_features(real_dev);
dev->mpls_features = real_dev->mpls_features;
/* ipv6 shared card related stuff */
dev->dev_id = real_dev->dev_id;
if (is_zero_ether_addr(dev->dev_addr)) {
eth_hw_addr_set(dev, real_dev->dev_addr);
dev->addr_assign_type = NET_ADDR_STOLEN;
}
if (is_zero_ether_addr(dev->broadcast))
memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
#if IS_ENABLED(CONFIG_FCOE)
dev->fcoe_ddp_xid = real_dev->fcoe_ddp_xid;
#endif
dev->needed_headroom = real_dev->needed_headroom;
if (vlan_hw_offload_capable(real_dev->features, vlan->vlan_proto)) {
dev->header_ops = &vlan_passthru_header_ops;
dev->hard_header_len = real_dev->hard_header_len;
} else {
dev->header_ops = &vlan_header_ops;
dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN;
}
dev->netdev_ops = &vlan_netdev_ops;
SET_NETDEV_DEVTYPE(dev, &vlan_type);
vlan_dev_set_lockdep_class(dev);
vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
if (!vlan->vlan_pcpu_stats)
return -ENOMEM;
/* Get vlan's reference to real_dev */
netdev_hold(real_dev, &vlan->dev_tracker, GFP_KERNEL);
return 0;
}
/* Note: this function might be called multiple times for the same device. */
void vlan_dev_free_egress_priority(const struct net_device *dev)
{
struct vlan_priority_tci_mapping *pm;
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
int i;
for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
while ((pm = vlan->egress_priority_map[i]) != NULL) {
vlan->egress_priority_map[i] = pm->next;
kfree(pm);
}
}
}
static void vlan_dev_uninit(struct net_device *dev)
{
vlan_dev_free_egress_priority(dev);
}
static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
netdev_features_t old_features = features;
netdev_features_t lower_features;
lower_features = netdev_intersect_features((real_dev->vlan_features |
NETIF_F_RXCSUM),
real_dev->features);
/* Add HW_CSUM setting to preserve user ability to control
* checksum offload on the vlan device.
*/
if (lower_features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
lower_features |= NETIF_F_HW_CSUM;
features = netdev_intersect_features(features, lower_features);
features |= old_features & (NETIF_F_SOFT_FEATURES | NETIF_F_GSO_SOFTWARE);
features |= NETIF_F_LLTX;
return features;
}
static int vlan_ethtool_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
return __ethtool_get_link_ksettings(vlan->real_dev, cmd);
}
static void vlan_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strscpy(info->driver, vlan_fullname, sizeof(info->driver));
strscpy(info->version, vlan_version, sizeof(info->version));
strscpy(info->fw_version, "N/A", sizeof(info->fw_version));
}
static int vlan_ethtool_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
const struct ethtool_ops *ops = vlan->real_dev->ethtool_ops;
struct phy_device *phydev = vlan->real_dev->phydev;
if (phy_has_tsinfo(phydev)) {
return phy_ts_info(phydev, info);
} else if (ops->get_ts_info) {
return ops->get_ts_info(vlan->real_dev, info);
} else {
info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE;
info->phc_index = -1;
}
return 0;
}
static void vlan_dev_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
struct vlan_pcpu_stats *p;
u32 rx_errors = 0, tx_dropped = 0;
int i;
for_each_possible_cpu(i) {
u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
unsigned int start;
p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
do {
start = u64_stats_fetch_begin(&p->syncp);
rxpackets = u64_stats_read(&p->rx_packets);
rxbytes = u64_stats_read(&p->rx_bytes);
rxmulticast = u64_stats_read(&p->rx_multicast);
txpackets = u64_stats_read(&p->tx_packets);
txbytes = u64_stats_read(&p->tx_bytes);
} while (u64_stats_fetch_retry(&p->syncp, start));
stats->rx_packets += rxpackets;
stats->rx_bytes += rxbytes;
stats->multicast += rxmulticast;
stats->tx_packets += txpackets;
stats->tx_bytes += txbytes;
/* rx_errors & tx_dropped are u32 */
rx_errors += READ_ONCE(p->rx_errors);
tx_dropped += READ_ONCE(p->tx_dropped);
}
stats->rx_errors = rx_errors;
stats->tx_dropped = tx_dropped;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void vlan_dev_poll_controller(struct net_device *dev)
{
return;
}
static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct net_device *real_dev = vlan->real_dev;
struct netpoll *netpoll;
int err = 0;
netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
err = -ENOMEM;
if (!netpoll)
goto out;
err = __netpoll_setup(netpoll, real_dev);
if (err) {
kfree(netpoll);
goto out;
}
vlan->netpoll = netpoll;
out:
return err;
}
static void vlan_dev_netpoll_cleanup(struct net_device *dev)
{
struct vlan_dev_priv *vlan= vlan_dev_priv(dev);
struct netpoll *netpoll = vlan->netpoll;
if (!netpoll)
return;
vlan->netpoll = NULL;
__netpoll_free(netpoll);
}
#endif /* CONFIG_NET_POLL_CONTROLLER */
static int vlan_dev_get_iflink(const struct net_device *dev)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
return real_dev->ifindex;
}
static int vlan_dev_fill_forward_path(struct net_device_path_ctx *ctx,
struct net_device_path *path)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(ctx->dev);
path->type = DEV_PATH_VLAN;
path->encap.id = vlan->vlan_id;
path->encap.proto = vlan->vlan_proto;
path->dev = ctx->dev;
ctx->dev = vlan->real_dev;
if (ctx->num_vlans >= ARRAY_SIZE(ctx->vlan))
return -ENOSPC;
ctx->vlan[ctx->num_vlans].id = vlan->vlan_id;
ctx->vlan[ctx->num_vlans].proto = vlan->vlan_proto;
ctx->num_vlans++;
return 0;
}
#if IS_ENABLED(CONFIG_MACSEC)
static const struct macsec_ops *vlan_get_macsec_ops(const struct macsec_context *ctx)
{
return vlan_dev_priv(ctx->netdev)->real_dev->macsec_ops;
}
static int vlan_macsec_offload(int (* const func)(struct macsec_context *),
struct macsec_context *ctx)
{
if (unlikely(!func))
return 0;
return (*func)(ctx);
}
static int vlan_macsec_dev_open(struct macsec_context *ctx)
{
const struct macsec_ops *ops = vlan_get_macsec_ops(ctx);
if (!ops)
return -EOPNOTSUPP;
return vlan_macsec_offload(ops->mdo_dev_open, ctx);
}
static int vlan_macsec_dev_stop(struct macsec_context *ctx)
{
const struct macsec_ops *ops = vlan_get_macsec_ops(ctx);
if (!ops)
return -EOPNOTSUPP;
return vlan_macsec_offload(ops->mdo_dev_stop, ctx);
}
static int vlan_macsec_add_secy(struct macsec_context *ctx)
{
const struct macsec_ops *ops = vlan_get_macsec_ops(ctx);
if (!ops)
return -EOPNOTSUPP;
return vlan_macsec_offload(ops->mdo_add_secy, ctx);
}
static int vlan_macsec_upd_secy(struct macsec_context *ctx)
{
const struct macsec_ops *ops = vlan_get_macsec_ops(ctx);
if (!ops)
return -EOPNOTSUPP;
return vlan_macsec_offload(ops->mdo_upd_secy, ctx);
}
static int vlan_macsec_del_secy(struct macsec_context *ctx)
{
const struct macsec_ops *ops = vlan_get_macsec_ops(ctx);
if (!ops)
return -EOPNOTSUPP;
return vlan_macsec_offload(ops->mdo_del_secy, ctx);
}
static int vlan_macsec_add_rxsc(struct macsec_context *ctx)
{
const struct macsec_ops *ops = vlan_get_macsec_ops(ctx);
if (!ops)
return -EOPNOTSUPP;
return vlan_macsec_offload(ops->mdo_add_rxsc, ctx);
}
static int vlan_macsec_upd_rxsc(struct macsec_context *ctx)
{
const struct macsec_ops *ops = vlan_get_macsec_ops(ctx);
if (!ops)
return -EOPNOTSUPP;
return vlan_macsec_offload(ops->mdo_upd_rxsc, ctx);
}
static int vlan_macsec_del_rxsc(struct macsec_context *ctx)
{
const struct macsec_ops *ops = vlan_get_macsec_ops(ctx);
if (!ops)
return -EOPNOTSUPP;
return vlan_macsec_offload(ops->mdo_del_rxsc, ctx);
}
static int vlan_macsec_add_rxsa(struct macsec_context *ctx)
{
const struct macsec_ops *ops = vlan_get_macsec_ops(ctx);
if (!ops)
return -EOPNOTSUPP;
return vlan_macsec_offload(ops->mdo_add_rxsa, ctx);
}
static int vlan_macsec_upd_rxsa(struct macsec_context *ctx)
{
const struct macsec_ops *ops = vlan_get_macsec_ops(ctx);
if (!ops)
return -EOPNOTSUPP;
return vlan_macsec_offload(ops->mdo_upd_rxsa, ctx);
}
static int vlan_macsec_del_rxsa(struct macsec_context *ctx)
{
const struct macsec_ops *ops = vlan_get_macsec_ops(ctx);
if (!ops)
return -EOPNOTSUPP;
return vlan_macsec_offload(ops->mdo_del_rxsa, ctx);
}
static int vlan_macsec_add_txsa(struct macsec_context *ctx)
{
const struct macsec_ops *ops = vlan_get_macsec_ops(ctx);
if (!ops)
return -EOPNOTSUPP;
return vlan_macsec_offload(ops->mdo_add_txsa, ctx);
}
static int vlan_macsec_upd_txsa(struct macsec_context *ctx)
{
const struct macsec_ops *ops = vlan_get_macsec_ops(ctx);
if (!ops)
return -EOPNOTSUPP;
return vlan_macsec_offload(ops->mdo_upd_txsa, ctx);
}
static int vlan_macsec_del_txsa(struct macsec_context *ctx)
{
const struct macsec_ops *ops = vlan_get_macsec_ops(ctx);
if (!ops)
return -EOPNOTSUPP;
return vlan_macsec_offload(ops->mdo_del_txsa, ctx);
}
static int vlan_macsec_get_dev_stats(struct macsec_context *ctx)
{
const struct macsec_ops *ops = vlan_get_macsec_ops(ctx);
if (!ops)
return -EOPNOTSUPP;
return vlan_macsec_offload(ops->mdo_get_dev_stats, ctx);
}
static int vlan_macsec_get_tx_sc_stats(struct macsec_context *ctx)
{
const struct macsec_ops *ops = vlan_get_macsec_ops(ctx);
if (!ops)
return -EOPNOTSUPP;
return vlan_macsec_offload(ops->mdo_get_tx_sc_stats, ctx);
}
static int vlan_macsec_get_tx_sa_stats(struct macsec_context *ctx)
{
const struct macsec_ops *ops = vlan_get_macsec_ops(ctx);
if (!ops)
return -EOPNOTSUPP;
return vlan_macsec_offload(ops->mdo_get_tx_sa_stats, ctx);
}
static int vlan_macsec_get_rx_sc_stats(struct macsec_context *ctx)
{
const struct macsec_ops *ops = vlan_get_macsec_ops(ctx);
if (!ops)
return -EOPNOTSUPP;
return vlan_macsec_offload(ops->mdo_get_rx_sc_stats, ctx);
}
static int vlan_macsec_get_rx_sa_stats(struct macsec_context *ctx)
{
const struct macsec_ops *ops = vlan_get_macsec_ops(ctx);
if (!ops)
return -EOPNOTSUPP;
return vlan_macsec_offload(ops->mdo_get_rx_sa_stats, ctx);
}
static const struct macsec_ops macsec_offload_ops = {
/* Device wide */
.mdo_dev_open = vlan_macsec_dev_open,
.mdo_dev_stop = vlan_macsec_dev_stop,
/* SecY */
.mdo_add_secy = vlan_macsec_add_secy,
.mdo_upd_secy = vlan_macsec_upd_secy,
.mdo_del_secy = vlan_macsec_del_secy,
/* Security channels */
.mdo_add_rxsc = vlan_macsec_add_rxsc,
.mdo_upd_rxsc = vlan_macsec_upd_rxsc,
.mdo_del_rxsc = vlan_macsec_del_rxsc,
/* Security associations */
.mdo_add_rxsa = vlan_macsec_add_rxsa,
.mdo_upd_rxsa = vlan_macsec_upd_rxsa,
.mdo_del_rxsa = vlan_macsec_del_rxsa,
.mdo_add_txsa = vlan_macsec_add_txsa,
.mdo_upd_txsa = vlan_macsec_upd_txsa,
.mdo_del_txsa = vlan_macsec_del_txsa,
/* Statistics */
.mdo_get_dev_stats = vlan_macsec_get_dev_stats,
.mdo_get_tx_sc_stats = vlan_macsec_get_tx_sc_stats,
.mdo_get_tx_sa_stats = vlan_macsec_get_tx_sa_stats,
.mdo_get_rx_sc_stats = vlan_macsec_get_rx_sc_stats,
.mdo_get_rx_sa_stats = vlan_macsec_get_rx_sa_stats,
};
#endif
static const struct ethtool_ops vlan_ethtool_ops = {
.get_link_ksettings = vlan_ethtool_get_link_ksettings,
.get_drvinfo = vlan_ethtool_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ts_info = vlan_ethtool_get_ts_info,
};
static const struct net_device_ops vlan_netdev_ops = {
.ndo_change_mtu = vlan_dev_change_mtu,
.ndo_init = vlan_dev_init,
.ndo_uninit = vlan_dev_uninit,
.ndo_open = vlan_dev_open,
.ndo_stop = vlan_dev_stop,
.ndo_start_xmit = vlan_dev_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = vlan_dev_set_mac_address,
.ndo_set_rx_mode = vlan_dev_set_rx_mode,
.ndo_change_rx_flags = vlan_dev_change_rx_flags,
.ndo_eth_ioctl = vlan_dev_ioctl,
.ndo_neigh_setup = vlan_dev_neigh_setup,
.ndo_get_stats64 = vlan_dev_get_stats64,
#if IS_ENABLED(CONFIG_FCOE)
.ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
.ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
.ndo_fcoe_enable = vlan_dev_fcoe_enable,
.ndo_fcoe_disable = vlan_dev_fcoe_disable,
.ndo_fcoe_ddp_target = vlan_dev_fcoe_ddp_target,
#endif
#ifdef NETDEV_FCOE_WWNN
.ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = vlan_dev_poll_controller,
.ndo_netpoll_setup = vlan_dev_netpoll_setup,
.ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup,
#endif
.ndo_fix_features = vlan_dev_fix_features,
.ndo_get_iflink = vlan_dev_get_iflink,
.ndo_fill_forward_path = vlan_dev_fill_forward_path,
.ndo_hwtstamp_get = vlan_hwtstamp_get,
.ndo_hwtstamp_set = vlan_hwtstamp_set,
};
static void vlan_dev_free(struct net_device *dev)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
free_percpu(vlan->vlan_pcpu_stats);
vlan->vlan_pcpu_stats = NULL;
/* Get rid of the vlan's reference to real_dev */
netdev_put(vlan->real_dev, &vlan->dev_tracker);
}
void vlan_setup(struct net_device *dev)
{
ether_setup(dev);
dev->priv_flags |= IFF_802_1Q_VLAN | IFF_NO_QUEUE;
dev->priv_flags |= IFF_UNICAST_FLT;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
netif_keep_dst(dev);
dev->netdev_ops = &vlan_netdev_ops;
dev->needs_free_netdev = true;
dev->priv_destructor = vlan_dev_free;
dev->ethtool_ops = &vlan_ethtool_ops;
#if IS_ENABLED(CONFIG_MACSEC)
dev->macsec_ops = &macsec_offload_ops;
#endif
dev->min_mtu = 0;
dev->max_mtu = ETH_MAX_MTU;
eth_zero_addr(dev->broadcast);
}
| linux-master | net/8021q/vlan_dev.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* VLAN netlink control interface
*
* Copyright (c) 2007 Patrick McHardy <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/module.h>
#include <net/net_namespace.h>
#include <net/netlink.h>
#include <net/rtnetlink.h>
#include "vlan.h"
static const struct nla_policy vlan_policy[IFLA_VLAN_MAX + 1] = {
[IFLA_VLAN_ID] = { .type = NLA_U16 },
[IFLA_VLAN_FLAGS] = { .len = sizeof(struct ifla_vlan_flags) },
[IFLA_VLAN_EGRESS_QOS] = { .type = NLA_NESTED },
[IFLA_VLAN_INGRESS_QOS] = { .type = NLA_NESTED },
[IFLA_VLAN_PROTOCOL] = { .type = NLA_U16 },
};
static const struct nla_policy vlan_map_policy[IFLA_VLAN_QOS_MAX + 1] = {
[IFLA_VLAN_QOS_MAPPING] = { .len = sizeof(struct ifla_vlan_qos_mapping) },
};
static inline int vlan_validate_qos_map(struct nlattr *attr)
{
if (!attr)
return 0;
return nla_validate_nested_deprecated(attr, IFLA_VLAN_QOS_MAX,
vlan_map_policy, NULL);
}
static int vlan_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct ifla_vlan_flags *flags;
u16 id;
int err;
if (tb[IFLA_ADDRESS]) {
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
NL_SET_ERR_MSG_MOD(extack, "Invalid link address");
return -EINVAL;
}
if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
NL_SET_ERR_MSG_MOD(extack, "Invalid link address");
return -EADDRNOTAVAIL;
}
}
if (!data) {
NL_SET_ERR_MSG_MOD(extack, "VLAN properties not specified");
return -EINVAL;
}
if (data[IFLA_VLAN_PROTOCOL]) {
switch (nla_get_be16(data[IFLA_VLAN_PROTOCOL])) {
case htons(ETH_P_8021Q):
case htons(ETH_P_8021AD):
break;
default:
NL_SET_ERR_MSG_MOD(extack, "Invalid VLAN protocol");
return -EPROTONOSUPPORT;
}
}
if (data[IFLA_VLAN_ID]) {
id = nla_get_u16(data[IFLA_VLAN_ID]);
if (id >= VLAN_VID_MASK) {
NL_SET_ERR_MSG_MOD(extack, "Invalid VLAN id");
return -ERANGE;
}
}
if (data[IFLA_VLAN_FLAGS]) {
flags = nla_data(data[IFLA_VLAN_FLAGS]);
if ((flags->flags & flags->mask) &
~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP |
VLAN_FLAG_LOOSE_BINDING | VLAN_FLAG_MVRP |
VLAN_FLAG_BRIDGE_BINDING)) {
NL_SET_ERR_MSG_MOD(extack, "Invalid VLAN flags");
return -EINVAL;
}
}
err = vlan_validate_qos_map(data[IFLA_VLAN_INGRESS_QOS]);
if (err < 0) {
NL_SET_ERR_MSG_MOD(extack, "Invalid ingress QOS map");
return err;
}
err = vlan_validate_qos_map(data[IFLA_VLAN_EGRESS_QOS]);
if (err < 0) {
NL_SET_ERR_MSG_MOD(extack, "Invalid egress QOS map");
return err;
}
return 0;
}
static int vlan_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct ifla_vlan_flags *flags;
struct ifla_vlan_qos_mapping *m;
struct nlattr *attr;
int rem, err;
if (data[IFLA_VLAN_FLAGS]) {
flags = nla_data(data[IFLA_VLAN_FLAGS]);
err = vlan_dev_change_flags(dev, flags->flags, flags->mask);
if (err)
return err;
}
if (data[IFLA_VLAN_INGRESS_QOS]) {
nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) {
m = nla_data(attr);
vlan_dev_set_ingress_priority(dev, m->to, m->from);
}
}
if (data[IFLA_VLAN_EGRESS_QOS]) {
nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) {
m = nla_data(attr);
err = vlan_dev_set_egress_priority(dev, m->from, m->to);
if (err)
return err;
}
}
return 0;
}
static int vlan_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct net_device *real_dev;
unsigned int max_mtu;
__be16 proto;
int err;
if (!data[IFLA_VLAN_ID]) {
NL_SET_ERR_MSG_MOD(extack, "VLAN id not specified");
return -EINVAL;
}
if (!tb[IFLA_LINK]) {
NL_SET_ERR_MSG_MOD(extack, "link not specified");
return -EINVAL;
}
real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
if (!real_dev) {
NL_SET_ERR_MSG_MOD(extack, "link does not exist");
return -ENODEV;
}
if (data[IFLA_VLAN_PROTOCOL])
proto = nla_get_be16(data[IFLA_VLAN_PROTOCOL]);
else
proto = htons(ETH_P_8021Q);
vlan->vlan_proto = proto;
vlan->vlan_id = nla_get_u16(data[IFLA_VLAN_ID]);
vlan->real_dev = real_dev;
dev->priv_flags |= (real_dev->priv_flags & IFF_XMIT_DST_RELEASE);
vlan->flags = VLAN_FLAG_REORDER_HDR;
err = vlan_check_real_dev(real_dev, vlan->vlan_proto, vlan->vlan_id,
extack);
if (err < 0)
return err;
max_mtu = netif_reduces_vlan_mtu(real_dev) ? real_dev->mtu - VLAN_HLEN :
real_dev->mtu;
if (!tb[IFLA_MTU])
dev->mtu = max_mtu;
else if (dev->mtu > max_mtu)
return -EINVAL;
/* Note: If this initial vlan_changelink() fails, we need
* to call vlan_dev_free_egress_priority() to free memory.
*/
err = vlan_changelink(dev, tb, data, extack);
if (!err)
err = register_vlan_dev(dev, extack);
if (err)
vlan_dev_free_egress_priority(dev);
return err;
}
static inline size_t vlan_qos_map_size(unsigned int n)
{
if (n == 0)
return 0;
/* IFLA_VLAN_{EGRESS,INGRESS}_QOS + n * IFLA_VLAN_QOS_MAPPING */
return nla_total_size(sizeof(struct nlattr)) +
nla_total_size(sizeof(struct ifla_vlan_qos_mapping)) * n;
}
static size_t vlan_get_size(const struct net_device *dev)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
return nla_total_size(2) + /* IFLA_VLAN_PROTOCOL */
nla_total_size(2) + /* IFLA_VLAN_ID */
nla_total_size(sizeof(struct ifla_vlan_flags)) + /* IFLA_VLAN_FLAGS */
vlan_qos_map_size(vlan->nr_ingress_mappings) +
vlan_qos_map_size(vlan->nr_egress_mappings);
}
static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct vlan_priority_tci_mapping *pm;
struct ifla_vlan_flags f;
struct ifla_vlan_qos_mapping m;
struct nlattr *nest;
unsigned int i;
if (nla_put_be16(skb, IFLA_VLAN_PROTOCOL, vlan->vlan_proto) ||
nla_put_u16(skb, IFLA_VLAN_ID, vlan->vlan_id))
goto nla_put_failure;
if (vlan->flags) {
f.flags = vlan->flags;
f.mask = ~0;
if (nla_put(skb, IFLA_VLAN_FLAGS, sizeof(f), &f))
goto nla_put_failure;
}
if (vlan->nr_ingress_mappings) {
nest = nla_nest_start_noflag(skb, IFLA_VLAN_INGRESS_QOS);
if (nest == NULL)
goto nla_put_failure;
for (i = 0; i < ARRAY_SIZE(vlan->ingress_priority_map); i++) {
if (!vlan->ingress_priority_map[i])
continue;
m.from = i;
m.to = vlan->ingress_priority_map[i];
if (nla_put(skb, IFLA_VLAN_QOS_MAPPING,
sizeof(m), &m))
goto nla_put_failure;
}
nla_nest_end(skb, nest);
}
if (vlan->nr_egress_mappings) {
nest = nla_nest_start_noflag(skb, IFLA_VLAN_EGRESS_QOS);
if (nest == NULL)
goto nla_put_failure;
for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
for (pm = vlan->egress_priority_map[i]; pm;
pm = pm->next) {
if (!pm->vlan_qos)
continue;
m.from = pm->priority;
m.to = (pm->vlan_qos >> 13) & 0x7;
if (nla_put(skb, IFLA_VLAN_QOS_MAPPING,
sizeof(m), &m))
goto nla_put_failure;
}
}
nla_nest_end(skb, nest);
}
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static struct net *vlan_get_link_net(const struct net_device *dev)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
return dev_net(real_dev);
}
struct rtnl_link_ops vlan_link_ops __read_mostly = {
.kind = "vlan",
.maxtype = IFLA_VLAN_MAX,
.policy = vlan_policy,
.priv_size = sizeof(struct vlan_dev_priv),
.setup = vlan_setup,
.validate = vlan_validate,
.newlink = vlan_newlink,
.changelink = vlan_changelink,
.dellink = unregister_vlan_dev,
.get_size = vlan_get_size,
.fill_info = vlan_fill_info,
.get_link_net = vlan_get_link_net,
};
int __init vlan_netlink_init(void)
{
return rtnl_link_register(&vlan_link_ops);
}
void __exit vlan_netlink_fini(void)
{
rtnl_link_unregister(&vlan_link_ops);
}
MODULE_ALIAS_RTNL_LINK("vlan");
| linux-master | net/8021q/vlan_netlink.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/slab.h>
#include <linux/gfp.h>
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/ceph/string_table.h>
static DEFINE_SPINLOCK(string_tree_lock);
static struct rb_root string_tree = RB_ROOT;
struct ceph_string *ceph_find_or_create_string(const char* str, size_t len)
{
struct ceph_string *cs, *exist;
struct rb_node **p, *parent;
int ret;
exist = NULL;
spin_lock(&string_tree_lock);
p = &string_tree.rb_node;
while (*p) {
exist = rb_entry(*p, struct ceph_string, node);
ret = ceph_compare_string(exist, str, len);
if (ret > 0)
p = &(*p)->rb_left;
else if (ret < 0)
p = &(*p)->rb_right;
else
break;
exist = NULL;
}
if (exist && !kref_get_unless_zero(&exist->kref)) {
rb_erase(&exist->node, &string_tree);
RB_CLEAR_NODE(&exist->node);
exist = NULL;
}
spin_unlock(&string_tree_lock);
if (exist)
return exist;
cs = kmalloc(sizeof(*cs) + len + 1, GFP_NOFS);
if (!cs)
return NULL;
kref_init(&cs->kref);
cs->len = len;
memcpy(cs->str, str, len);
cs->str[len] = 0;
retry:
exist = NULL;
parent = NULL;
p = &string_tree.rb_node;
spin_lock(&string_tree_lock);
while (*p) {
parent = *p;
exist = rb_entry(*p, struct ceph_string, node);
ret = ceph_compare_string(exist, str, len);
if (ret > 0)
p = &(*p)->rb_left;
else if (ret < 0)
p = &(*p)->rb_right;
else
break;
exist = NULL;
}
ret = 0;
if (!exist) {
rb_link_node(&cs->node, parent, p);
rb_insert_color(&cs->node, &string_tree);
} else if (!kref_get_unless_zero(&exist->kref)) {
rb_erase(&exist->node, &string_tree);
RB_CLEAR_NODE(&exist->node);
ret = -EAGAIN;
}
spin_unlock(&string_tree_lock);
if (ret == -EAGAIN)
goto retry;
if (exist) {
kfree(cs);
cs = exist;
}
return cs;
}
EXPORT_SYMBOL(ceph_find_or_create_string);
void ceph_release_string(struct kref *ref)
{
struct ceph_string *cs = container_of(ref, struct ceph_string, kref);
spin_lock(&string_tree_lock);
if (!RB_EMPTY_NODE(&cs->node)) {
rb_erase(&cs->node, &string_tree);
RB_CLEAR_NODE(&cs->node);
}
spin_unlock(&string_tree_lock);
kfree_rcu(cs, rcu);
}
EXPORT_SYMBOL(ceph_release_string);
bool ceph_strings_empty(void)
{
return RB_EMPTY_ROOT(&string_tree);
}
| linux-master | net/ceph/string_table.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/ceph/ceph_debug.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/ceph/buffer.h>
#include <linux/ceph/decode.h>
#include <linux/ceph/libceph.h> /* for kvmalloc */
struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp)
{
struct ceph_buffer *b;
b = kmalloc(sizeof(*b), gfp);
if (!b)
return NULL;
b->vec.iov_base = kvmalloc(len, gfp);
if (!b->vec.iov_base) {
kfree(b);
return NULL;
}
kref_init(&b->kref);
b->alloc_len = len;
b->vec.iov_len = len;
dout("buffer_new %p\n", b);
return b;
}
EXPORT_SYMBOL(ceph_buffer_new);
void ceph_buffer_release(struct kref *kref)
{
struct ceph_buffer *b = container_of(kref, struct ceph_buffer, kref);
dout("buffer_release %p\n", b);
kvfree(b->vec.iov_base);
kfree(b);
}
EXPORT_SYMBOL(ceph_buffer_release);
int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end)
{
size_t len;
ceph_decode_need(p, end, sizeof(u32), bad);
len = ceph_decode_32(p);
dout("decode_buffer len %d\n", (int)len);
ceph_decode_need(p, end, len, bad);
*b = ceph_buffer_new(len, GFP_NOFS);
if (!*b)
return -ENOMEM;
ceph_decode_copy(p, (*b)->vec.iov_base, len);
return 0;
bad:
return -EINVAL;
}
| linux-master | net/ceph/buffer.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Ceph msgr2 protocol implementation
*
* Copyright (C) 2020 Ilya Dryomov <[email protected]>
*/
#include <linux/ceph/ceph_debug.h>
#include <crypto/aead.h>
#include <crypto/hash.h>
#include <crypto/sha2.h>
#include <crypto/utils.h>
#include <linux/bvec.h>
#include <linux/crc32c.h>
#include <linux/net.h>
#include <linux/scatterlist.h>
#include <linux/socket.h>
#include <linux/sched/mm.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <linux/ceph/ceph_features.h>
#include <linux/ceph/decode.h>
#include <linux/ceph/libceph.h>
#include <linux/ceph/messenger.h>
#include "crypto.h" /* for CEPH_KEY_LEN and CEPH_MAX_CON_SECRET_LEN */
#define FRAME_TAG_HELLO 1
#define FRAME_TAG_AUTH_REQUEST 2
#define FRAME_TAG_AUTH_BAD_METHOD 3
#define FRAME_TAG_AUTH_REPLY_MORE 4
#define FRAME_TAG_AUTH_REQUEST_MORE 5
#define FRAME_TAG_AUTH_DONE 6
#define FRAME_TAG_AUTH_SIGNATURE 7
#define FRAME_TAG_CLIENT_IDENT 8
#define FRAME_TAG_SERVER_IDENT 9
#define FRAME_TAG_IDENT_MISSING_FEATURES 10
#define FRAME_TAG_SESSION_RECONNECT 11
#define FRAME_TAG_SESSION_RESET 12
#define FRAME_TAG_SESSION_RETRY 13
#define FRAME_TAG_SESSION_RETRY_GLOBAL 14
#define FRAME_TAG_SESSION_RECONNECT_OK 15
#define FRAME_TAG_WAIT 16
#define FRAME_TAG_MESSAGE 17
#define FRAME_TAG_KEEPALIVE2 18
#define FRAME_TAG_KEEPALIVE2_ACK 19
#define FRAME_TAG_ACK 20
#define FRAME_LATE_STATUS_ABORTED 0x1
#define FRAME_LATE_STATUS_COMPLETE 0xe
#define FRAME_LATE_STATUS_ABORTED_MASK 0xf
#define IN_S_HANDLE_PREAMBLE 1
#define IN_S_HANDLE_CONTROL 2
#define IN_S_HANDLE_CONTROL_REMAINDER 3
#define IN_S_PREPARE_READ_DATA 4
#define IN_S_PREPARE_READ_DATA_CONT 5
#define IN_S_PREPARE_READ_ENC_PAGE 6
#define IN_S_PREPARE_SPARSE_DATA 7
#define IN_S_PREPARE_SPARSE_DATA_CONT 8
#define IN_S_HANDLE_EPILOGUE 9
#define IN_S_FINISH_SKIP 10
#define OUT_S_QUEUE_DATA 1
#define OUT_S_QUEUE_DATA_CONT 2
#define OUT_S_QUEUE_ENC_PAGE 3
#define OUT_S_QUEUE_ZEROS 4
#define OUT_S_FINISH_MESSAGE 5
#define OUT_S_GET_NEXT 6
#define CTRL_BODY(p) ((void *)(p) + CEPH_PREAMBLE_LEN)
#define FRONT_PAD(p) ((void *)(p) + CEPH_EPILOGUE_SECURE_LEN)
#define MIDDLE_PAD(p) (FRONT_PAD(p) + CEPH_GCM_BLOCK_LEN)
#define DATA_PAD(p) (MIDDLE_PAD(p) + CEPH_GCM_BLOCK_LEN)
#define CEPH_MSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL)
static int do_recvmsg(struct socket *sock, struct iov_iter *it)
{
struct msghdr msg = { .msg_flags = CEPH_MSG_FLAGS };
int ret;
msg.msg_iter = *it;
while (iov_iter_count(it)) {
ret = sock_recvmsg(sock, &msg, msg.msg_flags);
if (ret <= 0) {
if (ret == -EAGAIN)
ret = 0;
return ret;
}
iov_iter_advance(it, ret);
}
WARN_ON(msg_data_left(&msg));
return 1;
}
/*
* Read as much as possible.
*
* Return:
* 1 - done, nothing (else) to read
* 0 - socket is empty, need to wait
* <0 - error
*/
static int ceph_tcp_recv(struct ceph_connection *con)
{
int ret;
dout("%s con %p %s %zu\n", __func__, con,
iov_iter_is_discard(&con->v2.in_iter) ? "discard" : "need",
iov_iter_count(&con->v2.in_iter));
ret = do_recvmsg(con->sock, &con->v2.in_iter);
dout("%s con %p ret %d left %zu\n", __func__, con, ret,
iov_iter_count(&con->v2.in_iter));
return ret;
}
static int do_sendmsg(struct socket *sock, struct iov_iter *it)
{
struct msghdr msg = { .msg_flags = CEPH_MSG_FLAGS };
int ret;
msg.msg_iter = *it;
while (iov_iter_count(it)) {
ret = sock_sendmsg(sock, &msg);
if (ret <= 0) {
if (ret == -EAGAIN)
ret = 0;
return ret;
}
iov_iter_advance(it, ret);
}
WARN_ON(msg_data_left(&msg));
return 1;
}
static int do_try_sendpage(struct socket *sock, struct iov_iter *it)
{
struct msghdr msg = { .msg_flags = CEPH_MSG_FLAGS };
struct bio_vec bv;
int ret;
if (WARN_ON(!iov_iter_is_bvec(it)))
return -EINVAL;
while (iov_iter_count(it)) {
/* iov_iter_iovec() for ITER_BVEC */
bvec_set_page(&bv, it->bvec->bv_page,
min(iov_iter_count(it),
it->bvec->bv_len - it->iov_offset),
it->bvec->bv_offset + it->iov_offset);
/*
* MSG_SPLICE_PAGES cannot properly handle pages with
* page_count == 0, we need to fall back to sendmsg if
* that's the case.
*
* Same goes for slab pages: skb_can_coalesce() allows
* coalescing neighboring slab objects into a single frag
* which triggers one of hardened usercopy checks.
*/
if (sendpage_ok(bv.bv_page))
msg.msg_flags |= MSG_SPLICE_PAGES;
else
msg.msg_flags &= ~MSG_SPLICE_PAGES;
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bv, 1, bv.bv_len);
ret = sock_sendmsg(sock, &msg);
if (ret <= 0) {
if (ret == -EAGAIN)
ret = 0;
return ret;
}
iov_iter_advance(it, ret);
}
return 1;
}
/*
* Write as much as possible. The socket is expected to be corked,
* so we don't bother with MSG_MORE here.
*
* Return:
* 1 - done, nothing (else) to write
* 0 - socket is full, need to wait
* <0 - error
*/
static int ceph_tcp_send(struct ceph_connection *con)
{
int ret;
dout("%s con %p have %zu try_sendpage %d\n", __func__, con,
iov_iter_count(&con->v2.out_iter), con->v2.out_iter_sendpage);
if (con->v2.out_iter_sendpage)
ret = do_try_sendpage(con->sock, &con->v2.out_iter);
else
ret = do_sendmsg(con->sock, &con->v2.out_iter);
dout("%s con %p ret %d left %zu\n", __func__, con, ret,
iov_iter_count(&con->v2.out_iter));
return ret;
}
static void add_in_kvec(struct ceph_connection *con, void *buf, int len)
{
BUG_ON(con->v2.in_kvec_cnt >= ARRAY_SIZE(con->v2.in_kvecs));
WARN_ON(!iov_iter_is_kvec(&con->v2.in_iter));
con->v2.in_kvecs[con->v2.in_kvec_cnt].iov_base = buf;
con->v2.in_kvecs[con->v2.in_kvec_cnt].iov_len = len;
con->v2.in_kvec_cnt++;
con->v2.in_iter.nr_segs++;
con->v2.in_iter.count += len;
}
static void reset_in_kvecs(struct ceph_connection *con)
{
WARN_ON(iov_iter_count(&con->v2.in_iter));
con->v2.in_kvec_cnt = 0;
iov_iter_kvec(&con->v2.in_iter, ITER_DEST, con->v2.in_kvecs, 0, 0);
}
static void set_in_bvec(struct ceph_connection *con, const struct bio_vec *bv)
{
WARN_ON(iov_iter_count(&con->v2.in_iter));
con->v2.in_bvec = *bv;
iov_iter_bvec(&con->v2.in_iter, ITER_DEST, &con->v2.in_bvec, 1, bv->bv_len);
}
static void set_in_skip(struct ceph_connection *con, int len)
{
WARN_ON(iov_iter_count(&con->v2.in_iter));
dout("%s con %p len %d\n", __func__, con, len);
iov_iter_discard(&con->v2.in_iter, ITER_DEST, len);
}
static void add_out_kvec(struct ceph_connection *con, void *buf, int len)
{
BUG_ON(con->v2.out_kvec_cnt >= ARRAY_SIZE(con->v2.out_kvecs));
WARN_ON(!iov_iter_is_kvec(&con->v2.out_iter));
WARN_ON(con->v2.out_zero);
con->v2.out_kvecs[con->v2.out_kvec_cnt].iov_base = buf;
con->v2.out_kvecs[con->v2.out_kvec_cnt].iov_len = len;
con->v2.out_kvec_cnt++;
con->v2.out_iter.nr_segs++;
con->v2.out_iter.count += len;
}
static void reset_out_kvecs(struct ceph_connection *con)
{
WARN_ON(iov_iter_count(&con->v2.out_iter));
WARN_ON(con->v2.out_zero);
con->v2.out_kvec_cnt = 0;
iov_iter_kvec(&con->v2.out_iter, ITER_SOURCE, con->v2.out_kvecs, 0, 0);
con->v2.out_iter_sendpage = false;
}
static void set_out_bvec(struct ceph_connection *con, const struct bio_vec *bv,
bool zerocopy)
{
WARN_ON(iov_iter_count(&con->v2.out_iter));
WARN_ON(con->v2.out_zero);
con->v2.out_bvec = *bv;
con->v2.out_iter_sendpage = zerocopy;
iov_iter_bvec(&con->v2.out_iter, ITER_SOURCE, &con->v2.out_bvec, 1,
con->v2.out_bvec.bv_len);
}
static void set_out_bvec_zero(struct ceph_connection *con)
{
WARN_ON(iov_iter_count(&con->v2.out_iter));
WARN_ON(!con->v2.out_zero);
bvec_set_page(&con->v2.out_bvec, ceph_zero_page,
min(con->v2.out_zero, (int)PAGE_SIZE), 0);
con->v2.out_iter_sendpage = true;
iov_iter_bvec(&con->v2.out_iter, ITER_SOURCE, &con->v2.out_bvec, 1,
con->v2.out_bvec.bv_len);
}
static void out_zero_add(struct ceph_connection *con, int len)
{
dout("%s con %p len %d\n", __func__, con, len);
con->v2.out_zero += len;
}
static void *alloc_conn_buf(struct ceph_connection *con, int len)
{
void *buf;
dout("%s con %p len %d\n", __func__, con, len);
if (WARN_ON(con->v2.conn_buf_cnt >= ARRAY_SIZE(con->v2.conn_bufs)))
return NULL;
buf = kvmalloc(len, GFP_NOIO);
if (!buf)
return NULL;
con->v2.conn_bufs[con->v2.conn_buf_cnt++] = buf;
return buf;
}
static void free_conn_bufs(struct ceph_connection *con)
{
while (con->v2.conn_buf_cnt)
kvfree(con->v2.conn_bufs[--con->v2.conn_buf_cnt]);
}
static void add_in_sign_kvec(struct ceph_connection *con, void *buf, int len)
{
BUG_ON(con->v2.in_sign_kvec_cnt >= ARRAY_SIZE(con->v2.in_sign_kvecs));
con->v2.in_sign_kvecs[con->v2.in_sign_kvec_cnt].iov_base = buf;
con->v2.in_sign_kvecs[con->v2.in_sign_kvec_cnt].iov_len = len;
con->v2.in_sign_kvec_cnt++;
}
static void clear_in_sign_kvecs(struct ceph_connection *con)
{
con->v2.in_sign_kvec_cnt = 0;
}
static void add_out_sign_kvec(struct ceph_connection *con, void *buf, int len)
{
BUG_ON(con->v2.out_sign_kvec_cnt >= ARRAY_SIZE(con->v2.out_sign_kvecs));
con->v2.out_sign_kvecs[con->v2.out_sign_kvec_cnt].iov_base = buf;
con->v2.out_sign_kvecs[con->v2.out_sign_kvec_cnt].iov_len = len;
con->v2.out_sign_kvec_cnt++;
}
static void clear_out_sign_kvecs(struct ceph_connection *con)
{
con->v2.out_sign_kvec_cnt = 0;
}
static bool con_secure(struct ceph_connection *con)
{
return con->v2.con_mode == CEPH_CON_MODE_SECURE;
}
static int front_len(const struct ceph_msg *msg)
{
return le32_to_cpu(msg->hdr.front_len);
}
static int middle_len(const struct ceph_msg *msg)
{
return le32_to_cpu(msg->hdr.middle_len);
}
static int data_len(const struct ceph_msg *msg)
{
return le32_to_cpu(msg->hdr.data_len);
}
static bool need_padding(int len)
{
return !IS_ALIGNED(len, CEPH_GCM_BLOCK_LEN);
}
static int padded_len(int len)
{
return ALIGN(len, CEPH_GCM_BLOCK_LEN);
}
static int padding_len(int len)
{
return padded_len(len) - len;
}
/* preamble + control segment */
static int head_onwire_len(int ctrl_len, bool secure)
{
int head_len;
int rem_len;
BUG_ON(ctrl_len < 0 || ctrl_len > CEPH_MSG_MAX_CONTROL_LEN);
if (secure) {
head_len = CEPH_PREAMBLE_SECURE_LEN;
if (ctrl_len > CEPH_PREAMBLE_INLINE_LEN) {
rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN;
head_len += padded_len(rem_len) + CEPH_GCM_TAG_LEN;
}
} else {
head_len = CEPH_PREAMBLE_PLAIN_LEN;
if (ctrl_len)
head_len += ctrl_len + CEPH_CRC_LEN;
}
return head_len;
}
/* front, middle and data segments + epilogue */
static int __tail_onwire_len(int front_len, int middle_len, int data_len,
bool secure)
{
BUG_ON(front_len < 0 || front_len > CEPH_MSG_MAX_FRONT_LEN ||
middle_len < 0 || middle_len > CEPH_MSG_MAX_MIDDLE_LEN ||
data_len < 0 || data_len > CEPH_MSG_MAX_DATA_LEN);
if (!front_len && !middle_len && !data_len)
return 0;
if (!secure)
return front_len + middle_len + data_len +
CEPH_EPILOGUE_PLAIN_LEN;
return padded_len(front_len) + padded_len(middle_len) +
padded_len(data_len) + CEPH_EPILOGUE_SECURE_LEN;
}
static int tail_onwire_len(const struct ceph_msg *msg, bool secure)
{
return __tail_onwire_len(front_len(msg), middle_len(msg),
data_len(msg), secure);
}
/* head_onwire_len(sizeof(struct ceph_msg_header2), false) */
#define MESSAGE_HEAD_PLAIN_LEN (CEPH_PREAMBLE_PLAIN_LEN + \
sizeof(struct ceph_msg_header2) + \
CEPH_CRC_LEN)
static const int frame_aligns[] = {
sizeof(void *),
sizeof(void *),
sizeof(void *),
PAGE_SIZE
};
/*
* Discards trailing empty segments, unless there is just one segment.
* A frame always has at least one (possibly empty) segment.
*/
static int calc_segment_count(const int *lens, int len_cnt)
{
int i;
for (i = len_cnt - 1; i >= 0; i--) {
if (lens[i])
return i + 1;
}
return 1;
}
static void init_frame_desc(struct ceph_frame_desc *desc, int tag,
const int *lens, int len_cnt)
{
int i;
memset(desc, 0, sizeof(*desc));
desc->fd_tag = tag;
desc->fd_seg_cnt = calc_segment_count(lens, len_cnt);
BUG_ON(desc->fd_seg_cnt > CEPH_FRAME_MAX_SEGMENT_COUNT);
for (i = 0; i < desc->fd_seg_cnt; i++) {
desc->fd_lens[i] = lens[i];
desc->fd_aligns[i] = frame_aligns[i];
}
}
/*
* Preamble crc covers everything up to itself (28 bytes) and
* is calculated and verified irrespective of the connection mode
* (i.e. even if the frame is encrypted).
*/
static void encode_preamble(const struct ceph_frame_desc *desc, void *p)
{
void *crcp = p + CEPH_PREAMBLE_LEN - CEPH_CRC_LEN;
void *start = p;
int i;
memset(p, 0, CEPH_PREAMBLE_LEN);
ceph_encode_8(&p, desc->fd_tag);
ceph_encode_8(&p, desc->fd_seg_cnt);
for (i = 0; i < desc->fd_seg_cnt; i++) {
ceph_encode_32(&p, desc->fd_lens[i]);
ceph_encode_16(&p, desc->fd_aligns[i]);
}
put_unaligned_le32(crc32c(0, start, crcp - start), crcp);
}
static int decode_preamble(void *p, struct ceph_frame_desc *desc)
{
void *crcp = p + CEPH_PREAMBLE_LEN - CEPH_CRC_LEN;
u32 crc, expected_crc;
int i;
crc = crc32c(0, p, crcp - p);
expected_crc = get_unaligned_le32(crcp);
if (crc != expected_crc) {
pr_err("bad preamble crc, calculated %u, expected %u\n",
crc, expected_crc);
return -EBADMSG;
}
memset(desc, 0, sizeof(*desc));
desc->fd_tag = ceph_decode_8(&p);
desc->fd_seg_cnt = ceph_decode_8(&p);
if (desc->fd_seg_cnt < 1 ||
desc->fd_seg_cnt > CEPH_FRAME_MAX_SEGMENT_COUNT) {
pr_err("bad segment count %d\n", desc->fd_seg_cnt);
return -EINVAL;
}
for (i = 0; i < desc->fd_seg_cnt; i++) {
desc->fd_lens[i] = ceph_decode_32(&p);
desc->fd_aligns[i] = ceph_decode_16(&p);
}
if (desc->fd_lens[0] < 0 ||
desc->fd_lens[0] > CEPH_MSG_MAX_CONTROL_LEN) {
pr_err("bad control segment length %d\n", desc->fd_lens[0]);
return -EINVAL;
}
if (desc->fd_lens[1] < 0 ||
desc->fd_lens[1] > CEPH_MSG_MAX_FRONT_LEN) {
pr_err("bad front segment length %d\n", desc->fd_lens[1]);
return -EINVAL;
}
if (desc->fd_lens[2] < 0 ||
desc->fd_lens[2] > CEPH_MSG_MAX_MIDDLE_LEN) {
pr_err("bad middle segment length %d\n", desc->fd_lens[2]);
return -EINVAL;
}
if (desc->fd_lens[3] < 0 ||
desc->fd_lens[3] > CEPH_MSG_MAX_DATA_LEN) {
pr_err("bad data segment length %d\n", desc->fd_lens[3]);
return -EINVAL;
}
/*
* This would fire for FRAME_TAG_WAIT (it has one empty
* segment), but we should never get it as client.
*/
if (!desc->fd_lens[desc->fd_seg_cnt - 1]) {
pr_err("last segment empty, segment count %d\n",
desc->fd_seg_cnt);
return -EINVAL;
}
return 0;
}
static void encode_epilogue_plain(struct ceph_connection *con, bool aborted)
{
con->v2.out_epil.late_status = aborted ? FRAME_LATE_STATUS_ABORTED :
FRAME_LATE_STATUS_COMPLETE;
cpu_to_le32s(&con->v2.out_epil.front_crc);
cpu_to_le32s(&con->v2.out_epil.middle_crc);
cpu_to_le32s(&con->v2.out_epil.data_crc);
}
static void encode_epilogue_secure(struct ceph_connection *con, bool aborted)
{
memset(&con->v2.out_epil, 0, sizeof(con->v2.out_epil));
con->v2.out_epil.late_status = aborted ? FRAME_LATE_STATUS_ABORTED :
FRAME_LATE_STATUS_COMPLETE;
}
static int decode_epilogue(void *p, u32 *front_crc, u32 *middle_crc,
u32 *data_crc)
{
u8 late_status;
late_status = ceph_decode_8(&p);
if ((late_status & FRAME_LATE_STATUS_ABORTED_MASK) !=
FRAME_LATE_STATUS_COMPLETE) {
/* we should never get an aborted message as client */
pr_err("bad late_status 0x%x\n", late_status);
return -EINVAL;
}
if (front_crc && middle_crc && data_crc) {
*front_crc = ceph_decode_32(&p);
*middle_crc = ceph_decode_32(&p);
*data_crc = ceph_decode_32(&p);
}
return 0;
}
static void fill_header(struct ceph_msg_header *hdr,
const struct ceph_msg_header2 *hdr2,
int front_len, int middle_len, int data_len,
const struct ceph_entity_name *peer_name)
{
hdr->seq = hdr2->seq;
hdr->tid = hdr2->tid;
hdr->type = hdr2->type;
hdr->priority = hdr2->priority;
hdr->version = hdr2->version;
hdr->front_len = cpu_to_le32(front_len);
hdr->middle_len = cpu_to_le32(middle_len);
hdr->data_len = cpu_to_le32(data_len);
hdr->data_off = hdr2->data_off;
hdr->src = *peer_name;
hdr->compat_version = hdr2->compat_version;
hdr->reserved = 0;
hdr->crc = 0;
}
static void fill_header2(struct ceph_msg_header2 *hdr2,
const struct ceph_msg_header *hdr, u64 ack_seq)
{
hdr2->seq = hdr->seq;
hdr2->tid = hdr->tid;
hdr2->type = hdr->type;
hdr2->priority = hdr->priority;
hdr2->version = hdr->version;
hdr2->data_pre_padding_len = 0;
hdr2->data_off = hdr->data_off;
hdr2->ack_seq = cpu_to_le64(ack_seq);
hdr2->flags = 0;
hdr2->compat_version = hdr->compat_version;
hdr2->reserved = 0;
}
static int verify_control_crc(struct ceph_connection *con)
{
int ctrl_len = con->v2.in_desc.fd_lens[0];
u32 crc, expected_crc;
WARN_ON(con->v2.in_kvecs[0].iov_len != ctrl_len);
WARN_ON(con->v2.in_kvecs[1].iov_len != CEPH_CRC_LEN);
crc = crc32c(-1, con->v2.in_kvecs[0].iov_base, ctrl_len);
expected_crc = get_unaligned_le32(con->v2.in_kvecs[1].iov_base);
if (crc != expected_crc) {
pr_err("bad control crc, calculated %u, expected %u\n",
crc, expected_crc);
return -EBADMSG;
}
return 0;
}
static int verify_epilogue_crcs(struct ceph_connection *con, u32 front_crc,
u32 middle_crc, u32 data_crc)
{
if (front_len(con->in_msg)) {
con->in_front_crc = crc32c(-1, con->in_msg->front.iov_base,
front_len(con->in_msg));
} else {
WARN_ON(!middle_len(con->in_msg) && !data_len(con->in_msg));
con->in_front_crc = -1;
}
if (middle_len(con->in_msg))
con->in_middle_crc = crc32c(-1,
con->in_msg->middle->vec.iov_base,
middle_len(con->in_msg));
else if (data_len(con->in_msg))
con->in_middle_crc = -1;
else
con->in_middle_crc = 0;
if (!data_len(con->in_msg))
con->in_data_crc = 0;
dout("%s con %p msg %p crcs %u %u %u\n", __func__, con, con->in_msg,
con->in_front_crc, con->in_middle_crc, con->in_data_crc);
if (con->in_front_crc != front_crc) {
pr_err("bad front crc, calculated %u, expected %u\n",
con->in_front_crc, front_crc);
return -EBADMSG;
}
if (con->in_middle_crc != middle_crc) {
pr_err("bad middle crc, calculated %u, expected %u\n",
con->in_middle_crc, middle_crc);
return -EBADMSG;
}
if (con->in_data_crc != data_crc) {
pr_err("bad data crc, calculated %u, expected %u\n",
con->in_data_crc, data_crc);
return -EBADMSG;
}
return 0;
}
static int setup_crypto(struct ceph_connection *con,
const u8 *session_key, int session_key_len,
const u8 *con_secret, int con_secret_len)
{
unsigned int noio_flag;
int ret;
dout("%s con %p con_mode %d session_key_len %d con_secret_len %d\n",
__func__, con, con->v2.con_mode, session_key_len, con_secret_len);
WARN_ON(con->v2.hmac_tfm || con->v2.gcm_tfm || con->v2.gcm_req);
if (con->v2.con_mode != CEPH_CON_MODE_CRC &&
con->v2.con_mode != CEPH_CON_MODE_SECURE) {
pr_err("bad con_mode %d\n", con->v2.con_mode);
return -EINVAL;
}
if (!session_key_len) {
WARN_ON(con->v2.con_mode != CEPH_CON_MODE_CRC);
WARN_ON(con_secret_len);
return 0; /* auth_none */
}
noio_flag = memalloc_noio_save();
con->v2.hmac_tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
memalloc_noio_restore(noio_flag);
if (IS_ERR(con->v2.hmac_tfm)) {
ret = PTR_ERR(con->v2.hmac_tfm);
con->v2.hmac_tfm = NULL;
pr_err("failed to allocate hmac tfm context: %d\n", ret);
return ret;
}
WARN_ON((unsigned long)session_key &
crypto_shash_alignmask(con->v2.hmac_tfm));
ret = crypto_shash_setkey(con->v2.hmac_tfm, session_key,
session_key_len);
if (ret) {
pr_err("failed to set hmac key: %d\n", ret);
return ret;
}
if (con->v2.con_mode == CEPH_CON_MODE_CRC) {
WARN_ON(con_secret_len);
return 0; /* auth_x, plain mode */
}
if (con_secret_len < CEPH_GCM_KEY_LEN + 2 * CEPH_GCM_IV_LEN) {
pr_err("con_secret too small %d\n", con_secret_len);
return -EINVAL;
}
noio_flag = memalloc_noio_save();
con->v2.gcm_tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
memalloc_noio_restore(noio_flag);
if (IS_ERR(con->v2.gcm_tfm)) {
ret = PTR_ERR(con->v2.gcm_tfm);
con->v2.gcm_tfm = NULL;
pr_err("failed to allocate gcm tfm context: %d\n", ret);
return ret;
}
WARN_ON((unsigned long)con_secret &
crypto_aead_alignmask(con->v2.gcm_tfm));
ret = crypto_aead_setkey(con->v2.gcm_tfm, con_secret, CEPH_GCM_KEY_LEN);
if (ret) {
pr_err("failed to set gcm key: %d\n", ret);
return ret;
}
WARN_ON(crypto_aead_ivsize(con->v2.gcm_tfm) != CEPH_GCM_IV_LEN);
ret = crypto_aead_setauthsize(con->v2.gcm_tfm, CEPH_GCM_TAG_LEN);
if (ret) {
pr_err("failed to set gcm tag size: %d\n", ret);
return ret;
}
con->v2.gcm_req = aead_request_alloc(con->v2.gcm_tfm, GFP_NOIO);
if (!con->v2.gcm_req) {
pr_err("failed to allocate gcm request\n");
return -ENOMEM;
}
crypto_init_wait(&con->v2.gcm_wait);
aead_request_set_callback(con->v2.gcm_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &con->v2.gcm_wait);
memcpy(&con->v2.in_gcm_nonce, con_secret + CEPH_GCM_KEY_LEN,
CEPH_GCM_IV_LEN);
memcpy(&con->v2.out_gcm_nonce,
con_secret + CEPH_GCM_KEY_LEN + CEPH_GCM_IV_LEN,
CEPH_GCM_IV_LEN);
return 0; /* auth_x, secure mode */
}
static int hmac_sha256(struct ceph_connection *con, const struct kvec *kvecs,
int kvec_cnt, u8 *hmac)
{
SHASH_DESC_ON_STACK(desc, con->v2.hmac_tfm); /* tfm arg is ignored */
int ret;
int i;
dout("%s con %p hmac_tfm %p kvec_cnt %d\n", __func__, con,
con->v2.hmac_tfm, kvec_cnt);
if (!con->v2.hmac_tfm) {
memset(hmac, 0, SHA256_DIGEST_SIZE);
return 0; /* auth_none */
}
desc->tfm = con->v2.hmac_tfm;
ret = crypto_shash_init(desc);
if (ret)
goto out;
for (i = 0; i < kvec_cnt; i++) {
WARN_ON((unsigned long)kvecs[i].iov_base &
crypto_shash_alignmask(con->v2.hmac_tfm));
ret = crypto_shash_update(desc, kvecs[i].iov_base,
kvecs[i].iov_len);
if (ret)
goto out;
}
ret = crypto_shash_final(desc, hmac);
out:
shash_desc_zero(desc);
return ret; /* auth_x, both plain and secure modes */
}
static void gcm_inc_nonce(struct ceph_gcm_nonce *nonce)
{
u64 counter;
counter = le64_to_cpu(nonce->counter);
nonce->counter = cpu_to_le64(counter + 1);
}
static int gcm_crypt(struct ceph_connection *con, bool encrypt,
struct scatterlist *src, struct scatterlist *dst,
int src_len)
{
struct ceph_gcm_nonce *nonce;
int ret;
nonce = encrypt ? &con->v2.out_gcm_nonce : &con->v2.in_gcm_nonce;
aead_request_set_ad(con->v2.gcm_req, 0); /* no AAD */
aead_request_set_crypt(con->v2.gcm_req, src, dst, src_len, (u8 *)nonce);
ret = crypto_wait_req(encrypt ? crypto_aead_encrypt(con->v2.gcm_req) :
crypto_aead_decrypt(con->v2.gcm_req),
&con->v2.gcm_wait);
if (ret)
return ret;
gcm_inc_nonce(nonce);
return 0;
}
static void get_bvec_at(struct ceph_msg_data_cursor *cursor,
struct bio_vec *bv)
{
struct page *page;
size_t off, len;
WARN_ON(!cursor->total_resid);
/* skip zero-length data items */
while (!cursor->resid)
ceph_msg_data_advance(cursor, 0);
/* get a piece of data, cursor isn't advanced */
page = ceph_msg_data_next(cursor, &off, &len);
bvec_set_page(bv, page, len, off);
}
static int calc_sg_cnt(void *buf, int buf_len)
{
int sg_cnt;
if (!buf_len)
return 0;
sg_cnt = need_padding(buf_len) ? 1 : 0;
if (is_vmalloc_addr(buf)) {
WARN_ON(offset_in_page(buf));
sg_cnt += PAGE_ALIGN(buf_len) >> PAGE_SHIFT;
} else {
sg_cnt++;
}
return sg_cnt;
}
static int calc_sg_cnt_cursor(struct ceph_msg_data_cursor *cursor)
{
int data_len = cursor->total_resid;
struct bio_vec bv;
int sg_cnt;
if (!data_len)
return 0;
sg_cnt = need_padding(data_len) ? 1 : 0;
do {
get_bvec_at(cursor, &bv);
sg_cnt++;
ceph_msg_data_advance(cursor, bv.bv_len);
} while (cursor->total_resid);
return sg_cnt;
}
static void init_sgs(struct scatterlist **sg, void *buf, int buf_len, u8 *pad)
{
void *end = buf + buf_len;
struct page *page;
int len;
void *p;
if (!buf_len)
return;
if (is_vmalloc_addr(buf)) {
p = buf;
do {
page = vmalloc_to_page(p);
len = min_t(int, end - p, PAGE_SIZE);
WARN_ON(!page || !len || offset_in_page(p));
sg_set_page(*sg, page, len, 0);
*sg = sg_next(*sg);
p += len;
} while (p != end);
} else {
sg_set_buf(*sg, buf, buf_len);
*sg = sg_next(*sg);
}
if (need_padding(buf_len)) {
sg_set_buf(*sg, pad, padding_len(buf_len));
*sg = sg_next(*sg);
}
}
static void init_sgs_cursor(struct scatterlist **sg,
struct ceph_msg_data_cursor *cursor, u8 *pad)
{
int data_len = cursor->total_resid;
struct bio_vec bv;
if (!data_len)
return;
do {
get_bvec_at(cursor, &bv);
sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
*sg = sg_next(*sg);
ceph_msg_data_advance(cursor, bv.bv_len);
} while (cursor->total_resid);
if (need_padding(data_len)) {
sg_set_buf(*sg, pad, padding_len(data_len));
*sg = sg_next(*sg);
}
}
/**
* init_sgs_pages: set up scatterlist on an array of page pointers
* @sg: scatterlist to populate
* @pages: pointer to page array
* @dpos: position in the array to start (bytes)
* @dlen: len to add to sg (bytes)
* @pad: pointer to pad destination (if any)
*
* Populate the scatterlist from the page array, starting at an arbitrary
* byte in the array and running for a specified length.
*/
static void init_sgs_pages(struct scatterlist **sg, struct page **pages,
int dpos, int dlen, u8 *pad)
{
int idx = dpos >> PAGE_SHIFT;
int off = offset_in_page(dpos);
int resid = dlen;
do {
int len = min(resid, (int)PAGE_SIZE - off);
sg_set_page(*sg, pages[idx], len, off);
*sg = sg_next(*sg);
off = 0;
++idx;
resid -= len;
} while (resid);
if (need_padding(dlen)) {
sg_set_buf(*sg, pad, padding_len(dlen));
*sg = sg_next(*sg);
}
}
static int setup_message_sgs(struct sg_table *sgt, struct ceph_msg *msg,
u8 *front_pad, u8 *middle_pad, u8 *data_pad,
void *epilogue, struct page **pages, int dpos,
bool add_tag)
{
struct ceph_msg_data_cursor cursor;
struct scatterlist *cur_sg;
int dlen = data_len(msg);
int sg_cnt;
int ret;
if (!front_len(msg) && !middle_len(msg) && !data_len(msg))
return 0;
sg_cnt = 1; /* epilogue + [auth tag] */
if (front_len(msg))
sg_cnt += calc_sg_cnt(msg->front.iov_base,
front_len(msg));
if (middle_len(msg))
sg_cnt += calc_sg_cnt(msg->middle->vec.iov_base,
middle_len(msg));
if (dlen) {
if (pages) {
sg_cnt += calc_pages_for(dpos, dlen);
if (need_padding(dlen))
sg_cnt++;
} else {
ceph_msg_data_cursor_init(&cursor, msg, dlen);
sg_cnt += calc_sg_cnt_cursor(&cursor);
}
}
ret = sg_alloc_table(sgt, sg_cnt, GFP_NOIO);
if (ret)
return ret;
cur_sg = sgt->sgl;
if (front_len(msg))
init_sgs(&cur_sg, msg->front.iov_base, front_len(msg),
front_pad);
if (middle_len(msg))
init_sgs(&cur_sg, msg->middle->vec.iov_base, middle_len(msg),
middle_pad);
if (dlen) {
if (pages) {
init_sgs_pages(&cur_sg, pages, dpos, dlen, data_pad);
} else {
ceph_msg_data_cursor_init(&cursor, msg, dlen);
init_sgs_cursor(&cur_sg, &cursor, data_pad);
}
}
WARN_ON(!sg_is_last(cur_sg));
sg_set_buf(cur_sg, epilogue,
CEPH_GCM_BLOCK_LEN + (add_tag ? CEPH_GCM_TAG_LEN : 0));
return 0;
}
static int decrypt_preamble(struct ceph_connection *con)
{
struct scatterlist sg;
sg_init_one(&sg, con->v2.in_buf, CEPH_PREAMBLE_SECURE_LEN);
return gcm_crypt(con, false, &sg, &sg, CEPH_PREAMBLE_SECURE_LEN);
}
static int decrypt_control_remainder(struct ceph_connection *con)
{
int ctrl_len = con->v2.in_desc.fd_lens[0];
int rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN;
int pt_len = padding_len(rem_len) + CEPH_GCM_TAG_LEN;
struct scatterlist sgs[2];
WARN_ON(con->v2.in_kvecs[0].iov_len != rem_len);
WARN_ON(con->v2.in_kvecs[1].iov_len != pt_len);
sg_init_table(sgs, 2);
sg_set_buf(&sgs[0], con->v2.in_kvecs[0].iov_base, rem_len);
sg_set_buf(&sgs[1], con->v2.in_buf, pt_len);
return gcm_crypt(con, false, sgs, sgs,
padded_len(rem_len) + CEPH_GCM_TAG_LEN);
}
/* Process sparse read data that lives in a buffer */
static int process_v2_sparse_read(struct ceph_connection *con,
struct page **pages, int spos)
{
struct ceph_msg_data_cursor *cursor = &con->v2.in_cursor;
int ret;
for (;;) {
char *buf = NULL;
ret = con->ops->sparse_read(con, cursor, &buf);
if (ret <= 0)
return ret;
dout("%s: sparse_read return %x buf %p\n", __func__, ret, buf);
do {
int idx = spos >> PAGE_SHIFT;
int soff = offset_in_page(spos);
struct page *spage = con->v2.in_enc_pages[idx];
int len = min_t(int, ret, PAGE_SIZE - soff);
if (buf) {
memcpy_from_page(buf, spage, soff, len);
buf += len;
} else {
struct bio_vec bv;
get_bvec_at(cursor, &bv);
len = min_t(int, len, bv.bv_len);
memcpy_page(bv.bv_page, bv.bv_offset,
spage, soff, len);
ceph_msg_data_advance(cursor, len);
}
spos += len;
ret -= len;
} while (ret);
}
}
static int decrypt_tail(struct ceph_connection *con)
{
struct sg_table enc_sgt = {};
struct sg_table sgt = {};
struct page **pages = NULL;
bool sparse = con->in_msg->sparse_read;
int dpos = 0;
int tail_len;
int ret;
tail_len = tail_onwire_len(con->in_msg, true);
ret = sg_alloc_table_from_pages(&enc_sgt, con->v2.in_enc_pages,
con->v2.in_enc_page_cnt, 0, tail_len,
GFP_NOIO);
if (ret)
goto out;
if (sparse) {
dpos = padded_len(front_len(con->in_msg) + padded_len(middle_len(con->in_msg)));
pages = con->v2.in_enc_pages;
}
ret = setup_message_sgs(&sgt, con->in_msg, FRONT_PAD(con->v2.in_buf),
MIDDLE_PAD(con->v2.in_buf), DATA_PAD(con->v2.in_buf),
con->v2.in_buf, pages, dpos, true);
if (ret)
goto out;
dout("%s con %p msg %p enc_page_cnt %d sg_cnt %d\n", __func__, con,
con->in_msg, con->v2.in_enc_page_cnt, sgt.orig_nents);
ret = gcm_crypt(con, false, enc_sgt.sgl, sgt.sgl, tail_len);
if (ret)
goto out;
if (sparse && data_len(con->in_msg)) {
ret = process_v2_sparse_read(con, con->v2.in_enc_pages, dpos);
if (ret)
goto out;
}
WARN_ON(!con->v2.in_enc_page_cnt);
ceph_release_page_vector(con->v2.in_enc_pages,
con->v2.in_enc_page_cnt);
con->v2.in_enc_pages = NULL;
con->v2.in_enc_page_cnt = 0;
out:
sg_free_table(&sgt);
sg_free_table(&enc_sgt);
return ret;
}
static int prepare_banner(struct ceph_connection *con)
{
int buf_len = CEPH_BANNER_V2_LEN + 2 + 8 + 8;
void *buf, *p;
buf = alloc_conn_buf(con, buf_len);
if (!buf)
return -ENOMEM;
p = buf;
ceph_encode_copy(&p, CEPH_BANNER_V2, CEPH_BANNER_V2_LEN);
ceph_encode_16(&p, sizeof(u64) + sizeof(u64));
ceph_encode_64(&p, CEPH_MSGR2_SUPPORTED_FEATURES);
ceph_encode_64(&p, CEPH_MSGR2_REQUIRED_FEATURES);
WARN_ON(p != buf + buf_len);
add_out_kvec(con, buf, buf_len);
add_out_sign_kvec(con, buf, buf_len);
ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
return 0;
}
/*
* base:
* preamble
* control body (ctrl_len bytes)
* space for control crc
*
* extdata (optional):
* control body (extdata_len bytes)
*
* Compute control crc and gather base and extdata into:
*
* preamble
* control body (ctrl_len + extdata_len bytes)
* control crc
*
* Preamble should already be encoded at the start of base.
*/
static void prepare_head_plain(struct ceph_connection *con, void *base,
int ctrl_len, void *extdata, int extdata_len,
bool to_be_signed)
{
int base_len = CEPH_PREAMBLE_LEN + ctrl_len + CEPH_CRC_LEN;
void *crcp = base + base_len - CEPH_CRC_LEN;
u32 crc;
crc = crc32c(-1, CTRL_BODY(base), ctrl_len);
if (extdata_len)
crc = crc32c(crc, extdata, extdata_len);
put_unaligned_le32(crc, crcp);
if (!extdata_len) {
add_out_kvec(con, base, base_len);
if (to_be_signed)
add_out_sign_kvec(con, base, base_len);
return;
}
add_out_kvec(con, base, crcp - base);
add_out_kvec(con, extdata, extdata_len);
add_out_kvec(con, crcp, CEPH_CRC_LEN);
if (to_be_signed) {
add_out_sign_kvec(con, base, crcp - base);
add_out_sign_kvec(con, extdata, extdata_len);
add_out_sign_kvec(con, crcp, CEPH_CRC_LEN);
}
}
static int prepare_head_secure_small(struct ceph_connection *con,
void *base, int ctrl_len)
{
struct scatterlist sg;
int ret;
/* inline buffer padding? */
if (ctrl_len < CEPH_PREAMBLE_INLINE_LEN)
memset(CTRL_BODY(base) + ctrl_len, 0,
CEPH_PREAMBLE_INLINE_LEN - ctrl_len);
sg_init_one(&sg, base, CEPH_PREAMBLE_SECURE_LEN);
ret = gcm_crypt(con, true, &sg, &sg,
CEPH_PREAMBLE_SECURE_LEN - CEPH_GCM_TAG_LEN);
if (ret)
return ret;
add_out_kvec(con, base, CEPH_PREAMBLE_SECURE_LEN);
return 0;
}
/*
* base:
* preamble
* control body (ctrl_len bytes)
* space for padding, if needed
* space for control remainder auth tag
* space for preamble auth tag
*
* Encrypt preamble and the inline portion, then encrypt the remainder
* and gather into:
*
* preamble
* control body (48 bytes)
* preamble auth tag
* control body (ctrl_len - 48 bytes)
* zero padding, if needed
* control remainder auth tag
*
* Preamble should already be encoded at the start of base.
*/
static int prepare_head_secure_big(struct ceph_connection *con,
void *base, int ctrl_len)
{
int rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN;
void *rem = CTRL_BODY(base) + CEPH_PREAMBLE_INLINE_LEN;
void *rem_tag = rem + padded_len(rem_len);
void *pmbl_tag = rem_tag + CEPH_GCM_TAG_LEN;
struct scatterlist sgs[2];
int ret;
sg_init_table(sgs, 2);
sg_set_buf(&sgs[0], base, rem - base);
sg_set_buf(&sgs[1], pmbl_tag, CEPH_GCM_TAG_LEN);
ret = gcm_crypt(con, true, sgs, sgs, rem - base);
if (ret)
return ret;
/* control remainder padding? */
if (need_padding(rem_len))
memset(rem + rem_len, 0, padding_len(rem_len));
sg_init_one(&sgs[0], rem, pmbl_tag - rem);
ret = gcm_crypt(con, true, sgs, sgs, rem_tag - rem);
if (ret)
return ret;
add_out_kvec(con, base, rem - base);
add_out_kvec(con, pmbl_tag, CEPH_GCM_TAG_LEN);
add_out_kvec(con, rem, pmbl_tag - rem);
return 0;
}
static int __prepare_control(struct ceph_connection *con, int tag,
void *base, int ctrl_len, void *extdata,
int extdata_len, bool to_be_signed)
{
int total_len = ctrl_len + extdata_len;
struct ceph_frame_desc desc;
int ret;
dout("%s con %p tag %d len %d (%d+%d)\n", __func__, con, tag,
total_len, ctrl_len, extdata_len);
/* extdata may be vmalloc'ed but not base */
if (WARN_ON(is_vmalloc_addr(base) || !ctrl_len))
return -EINVAL;
init_frame_desc(&desc, tag, &total_len, 1);
encode_preamble(&desc, base);
if (con_secure(con)) {
if (WARN_ON(extdata_len || to_be_signed))
return -EINVAL;
if (ctrl_len <= CEPH_PREAMBLE_INLINE_LEN)
/* fully inlined, inline buffer may need padding */
ret = prepare_head_secure_small(con, base, ctrl_len);
else
/* partially inlined, inline buffer is full */
ret = prepare_head_secure_big(con, base, ctrl_len);
if (ret)
return ret;
} else {
prepare_head_plain(con, base, ctrl_len, extdata, extdata_len,
to_be_signed);
}
ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
return 0;
}
static int prepare_control(struct ceph_connection *con, int tag,
void *base, int ctrl_len)
{
return __prepare_control(con, tag, base, ctrl_len, NULL, 0, false);
}
static int prepare_hello(struct ceph_connection *con)
{
void *buf, *p;
int ctrl_len;
ctrl_len = 1 + ceph_entity_addr_encoding_len(&con->peer_addr);
buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, false));
if (!buf)
return -ENOMEM;
p = CTRL_BODY(buf);
ceph_encode_8(&p, CEPH_ENTITY_TYPE_CLIENT);
ceph_encode_entity_addr(&p, &con->peer_addr);
WARN_ON(p != CTRL_BODY(buf) + ctrl_len);
return __prepare_control(con, FRAME_TAG_HELLO, buf, ctrl_len,
NULL, 0, true);
}
/* so that head_onwire_len(AUTH_BUF_LEN, false) is 512 */
#define AUTH_BUF_LEN (512 - CEPH_CRC_LEN - CEPH_PREAMBLE_PLAIN_LEN)
static int prepare_auth_request(struct ceph_connection *con)
{
void *authorizer, *authorizer_copy;
int ctrl_len, authorizer_len;
void *buf;
int ret;
ctrl_len = AUTH_BUF_LEN;
buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, false));
if (!buf)
return -ENOMEM;
mutex_unlock(&con->mutex);
ret = con->ops->get_auth_request(con, CTRL_BODY(buf), &ctrl_len,
&authorizer, &authorizer_len);
mutex_lock(&con->mutex);
if (con->state != CEPH_CON_S_V2_HELLO) {
dout("%s con %p state changed to %d\n", __func__, con,
con->state);
return -EAGAIN;
}
dout("%s con %p get_auth_request ret %d\n", __func__, con, ret);
if (ret)
return ret;
authorizer_copy = alloc_conn_buf(con, authorizer_len);
if (!authorizer_copy)
return -ENOMEM;
memcpy(authorizer_copy, authorizer, authorizer_len);
return __prepare_control(con, FRAME_TAG_AUTH_REQUEST, buf, ctrl_len,
authorizer_copy, authorizer_len, true);
}
static int prepare_auth_request_more(struct ceph_connection *con,
void *reply, int reply_len)
{
int ctrl_len, authorizer_len;
void *authorizer;
void *buf;
int ret;
ctrl_len = AUTH_BUF_LEN;
buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, false));
if (!buf)
return -ENOMEM;
mutex_unlock(&con->mutex);
ret = con->ops->handle_auth_reply_more(con, reply, reply_len,
CTRL_BODY(buf), &ctrl_len,
&authorizer, &authorizer_len);
mutex_lock(&con->mutex);
if (con->state != CEPH_CON_S_V2_AUTH) {
dout("%s con %p state changed to %d\n", __func__, con,
con->state);
return -EAGAIN;
}
dout("%s con %p handle_auth_reply_more ret %d\n", __func__, con, ret);
if (ret)
return ret;
return __prepare_control(con, FRAME_TAG_AUTH_REQUEST_MORE, buf,
ctrl_len, authorizer, authorizer_len, true);
}
static int prepare_auth_signature(struct ceph_connection *con)
{
void *buf;
int ret;
buf = alloc_conn_buf(con, head_onwire_len(SHA256_DIGEST_SIZE,
con_secure(con)));
if (!buf)
return -ENOMEM;
ret = hmac_sha256(con, con->v2.in_sign_kvecs, con->v2.in_sign_kvec_cnt,
CTRL_BODY(buf));
if (ret)
return ret;
return prepare_control(con, FRAME_TAG_AUTH_SIGNATURE, buf,
SHA256_DIGEST_SIZE);
}
static int prepare_client_ident(struct ceph_connection *con)
{
struct ceph_entity_addr *my_addr = &con->msgr->inst.addr;
struct ceph_client *client = from_msgr(con->msgr);
u64 global_id = ceph_client_gid(client);
void *buf, *p;
int ctrl_len;
WARN_ON(con->v2.server_cookie);
WARN_ON(con->v2.connect_seq);
WARN_ON(con->v2.peer_global_seq);
if (!con->v2.client_cookie) {
do {
get_random_bytes(&con->v2.client_cookie,
sizeof(con->v2.client_cookie));
} while (!con->v2.client_cookie);
dout("%s con %p generated cookie 0x%llx\n", __func__, con,
con->v2.client_cookie);
} else {
dout("%s con %p cookie already set 0x%llx\n", __func__, con,
con->v2.client_cookie);
}
dout("%s con %p my_addr %s/%u peer_addr %s/%u global_id %llu global_seq %llu features 0x%llx required_features 0x%llx cookie 0x%llx\n",
__func__, con, ceph_pr_addr(my_addr), le32_to_cpu(my_addr->nonce),
ceph_pr_addr(&con->peer_addr), le32_to_cpu(con->peer_addr.nonce),
global_id, con->v2.global_seq, client->supported_features,
client->required_features, con->v2.client_cookie);
ctrl_len = 1 + 4 + ceph_entity_addr_encoding_len(my_addr) +
ceph_entity_addr_encoding_len(&con->peer_addr) + 6 * 8;
buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, con_secure(con)));
if (!buf)
return -ENOMEM;
p = CTRL_BODY(buf);
ceph_encode_8(&p, 2); /* addrvec marker */
ceph_encode_32(&p, 1); /* addr_cnt */
ceph_encode_entity_addr(&p, my_addr);
ceph_encode_entity_addr(&p, &con->peer_addr);
ceph_encode_64(&p, global_id);
ceph_encode_64(&p, con->v2.global_seq);
ceph_encode_64(&p, client->supported_features);
ceph_encode_64(&p, client->required_features);
ceph_encode_64(&p, 0); /* flags */
ceph_encode_64(&p, con->v2.client_cookie);
WARN_ON(p != CTRL_BODY(buf) + ctrl_len);
return prepare_control(con, FRAME_TAG_CLIENT_IDENT, buf, ctrl_len);
}
static int prepare_session_reconnect(struct ceph_connection *con)
{
struct ceph_entity_addr *my_addr = &con->msgr->inst.addr;
void *buf, *p;
int ctrl_len;
WARN_ON(!con->v2.client_cookie);
WARN_ON(!con->v2.server_cookie);
WARN_ON(!con->v2.connect_seq);
WARN_ON(!con->v2.peer_global_seq);
dout("%s con %p my_addr %s/%u client_cookie 0x%llx server_cookie 0x%llx global_seq %llu connect_seq %llu in_seq %llu\n",
__func__, con, ceph_pr_addr(my_addr), le32_to_cpu(my_addr->nonce),
con->v2.client_cookie, con->v2.server_cookie, con->v2.global_seq,
con->v2.connect_seq, con->in_seq);
ctrl_len = 1 + 4 + ceph_entity_addr_encoding_len(my_addr) + 5 * 8;
buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, con_secure(con)));
if (!buf)
return -ENOMEM;
p = CTRL_BODY(buf);
ceph_encode_8(&p, 2); /* entity_addrvec_t marker */
ceph_encode_32(&p, 1); /* my_addrs len */
ceph_encode_entity_addr(&p, my_addr);
ceph_encode_64(&p, con->v2.client_cookie);
ceph_encode_64(&p, con->v2.server_cookie);
ceph_encode_64(&p, con->v2.global_seq);
ceph_encode_64(&p, con->v2.connect_seq);
ceph_encode_64(&p, con->in_seq);
WARN_ON(p != CTRL_BODY(buf) + ctrl_len);
return prepare_control(con, FRAME_TAG_SESSION_RECONNECT, buf, ctrl_len);
}
static int prepare_keepalive2(struct ceph_connection *con)
{
struct ceph_timespec *ts = CTRL_BODY(con->v2.out_buf);
struct timespec64 now;
ktime_get_real_ts64(&now);
dout("%s con %p timestamp %lld.%09ld\n", __func__, con, now.tv_sec,
now.tv_nsec);
ceph_encode_timespec64(ts, &now);
reset_out_kvecs(con);
return prepare_control(con, FRAME_TAG_KEEPALIVE2, con->v2.out_buf,
sizeof(struct ceph_timespec));
}
static int prepare_ack(struct ceph_connection *con)
{
void *p;
dout("%s con %p in_seq_acked %llu -> %llu\n", __func__, con,
con->in_seq_acked, con->in_seq);
con->in_seq_acked = con->in_seq;
p = CTRL_BODY(con->v2.out_buf);
ceph_encode_64(&p, con->in_seq_acked);
reset_out_kvecs(con);
return prepare_control(con, FRAME_TAG_ACK, con->v2.out_buf, 8);
}
static void prepare_epilogue_plain(struct ceph_connection *con, bool aborted)
{
dout("%s con %p msg %p aborted %d crcs %u %u %u\n", __func__, con,
con->out_msg, aborted, con->v2.out_epil.front_crc,
con->v2.out_epil.middle_crc, con->v2.out_epil.data_crc);
encode_epilogue_plain(con, aborted);
add_out_kvec(con, &con->v2.out_epil, CEPH_EPILOGUE_PLAIN_LEN);
}
/*
* For "used" empty segments, crc is -1. For unused (trailing)
* segments, crc is 0.
*/
static void prepare_message_plain(struct ceph_connection *con)
{
struct ceph_msg *msg = con->out_msg;
prepare_head_plain(con, con->v2.out_buf,
sizeof(struct ceph_msg_header2), NULL, 0, false);
if (!front_len(msg) && !middle_len(msg)) {
if (!data_len(msg)) {
/*
* Empty message: once the head is written,
* we are done -- there is no epilogue.
*/
con->v2.out_state = OUT_S_FINISH_MESSAGE;
return;
}
con->v2.out_epil.front_crc = -1;
con->v2.out_epil.middle_crc = -1;
con->v2.out_state = OUT_S_QUEUE_DATA;
return;
}
if (front_len(msg)) {
con->v2.out_epil.front_crc = crc32c(-1, msg->front.iov_base,
front_len(msg));
add_out_kvec(con, msg->front.iov_base, front_len(msg));
} else {
/* middle (at least) is there, checked above */
con->v2.out_epil.front_crc = -1;
}
if (middle_len(msg)) {
con->v2.out_epil.middle_crc =
crc32c(-1, msg->middle->vec.iov_base, middle_len(msg));
add_out_kvec(con, msg->middle->vec.iov_base, middle_len(msg));
} else {
con->v2.out_epil.middle_crc = data_len(msg) ? -1 : 0;
}
if (data_len(msg)) {
con->v2.out_state = OUT_S_QUEUE_DATA;
} else {
con->v2.out_epil.data_crc = 0;
prepare_epilogue_plain(con, false);
con->v2.out_state = OUT_S_FINISH_MESSAGE;
}
}
/*
* Unfortunately the kernel crypto API doesn't support streaming
* (piecewise) operation for AEAD algorithms, so we can't get away
* with a fixed size buffer and a couple sgs. Instead, we have to
* allocate pages for the entire tail of the message (currently up
* to ~32M) and two sgs arrays (up to ~256K each)...
*/
static int prepare_message_secure(struct ceph_connection *con)
{
void *zerop = page_address(ceph_zero_page);
struct sg_table enc_sgt = {};
struct sg_table sgt = {};
struct page **enc_pages;
int enc_page_cnt;
int tail_len;
int ret;
ret = prepare_head_secure_small(con, con->v2.out_buf,
sizeof(struct ceph_msg_header2));
if (ret)
return ret;
tail_len = tail_onwire_len(con->out_msg, true);
if (!tail_len) {
/*
* Empty message: once the head is written,
* we are done -- there is no epilogue.
*/
con->v2.out_state = OUT_S_FINISH_MESSAGE;
return 0;
}
encode_epilogue_secure(con, false);
ret = setup_message_sgs(&sgt, con->out_msg, zerop, zerop, zerop,
&con->v2.out_epil, NULL, 0, false);
if (ret)
goto out;
enc_page_cnt = calc_pages_for(0, tail_len);
enc_pages = ceph_alloc_page_vector(enc_page_cnt, GFP_NOIO);
if (IS_ERR(enc_pages)) {
ret = PTR_ERR(enc_pages);
goto out;
}
WARN_ON(con->v2.out_enc_pages || con->v2.out_enc_page_cnt);
con->v2.out_enc_pages = enc_pages;
con->v2.out_enc_page_cnt = enc_page_cnt;
con->v2.out_enc_resid = tail_len;
con->v2.out_enc_i = 0;
ret = sg_alloc_table_from_pages(&enc_sgt, enc_pages, enc_page_cnt,
0, tail_len, GFP_NOIO);
if (ret)
goto out;
ret = gcm_crypt(con, true, sgt.sgl, enc_sgt.sgl,
tail_len - CEPH_GCM_TAG_LEN);
if (ret)
goto out;
dout("%s con %p msg %p sg_cnt %d enc_page_cnt %d\n", __func__, con,
con->out_msg, sgt.orig_nents, enc_page_cnt);
con->v2.out_state = OUT_S_QUEUE_ENC_PAGE;
out:
sg_free_table(&sgt);
sg_free_table(&enc_sgt);
return ret;
}
static int prepare_message(struct ceph_connection *con)
{
int lens[] = {
sizeof(struct ceph_msg_header2),
front_len(con->out_msg),
middle_len(con->out_msg),
data_len(con->out_msg)
};
struct ceph_frame_desc desc;
int ret;
dout("%s con %p msg %p logical %d+%d+%d+%d\n", __func__, con,
con->out_msg, lens[0], lens[1], lens[2], lens[3]);
if (con->in_seq > con->in_seq_acked) {
dout("%s con %p in_seq_acked %llu -> %llu\n", __func__, con,
con->in_seq_acked, con->in_seq);
con->in_seq_acked = con->in_seq;
}
reset_out_kvecs(con);
init_frame_desc(&desc, FRAME_TAG_MESSAGE, lens, 4);
encode_preamble(&desc, con->v2.out_buf);
fill_header2(CTRL_BODY(con->v2.out_buf), &con->out_msg->hdr,
con->in_seq_acked);
if (con_secure(con)) {
ret = prepare_message_secure(con);
if (ret)
return ret;
} else {
prepare_message_plain(con);
}
ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
return 0;
}
static int prepare_read_banner_prefix(struct ceph_connection *con)
{
void *buf;
buf = alloc_conn_buf(con, CEPH_BANNER_V2_PREFIX_LEN);
if (!buf)
return -ENOMEM;
reset_in_kvecs(con);
add_in_kvec(con, buf, CEPH_BANNER_V2_PREFIX_LEN);
add_in_sign_kvec(con, buf, CEPH_BANNER_V2_PREFIX_LEN);
con->state = CEPH_CON_S_V2_BANNER_PREFIX;
return 0;
}
static int prepare_read_banner_payload(struct ceph_connection *con,
int payload_len)
{
void *buf;
buf = alloc_conn_buf(con, payload_len);
if (!buf)
return -ENOMEM;
reset_in_kvecs(con);
add_in_kvec(con, buf, payload_len);
add_in_sign_kvec(con, buf, payload_len);
con->state = CEPH_CON_S_V2_BANNER_PAYLOAD;
return 0;
}
static void prepare_read_preamble(struct ceph_connection *con)
{
reset_in_kvecs(con);
add_in_kvec(con, con->v2.in_buf,
con_secure(con) ? CEPH_PREAMBLE_SECURE_LEN :
CEPH_PREAMBLE_PLAIN_LEN);
con->v2.in_state = IN_S_HANDLE_PREAMBLE;
}
static int prepare_read_control(struct ceph_connection *con)
{
int ctrl_len = con->v2.in_desc.fd_lens[0];
int head_len;
void *buf;
reset_in_kvecs(con);
if (con->state == CEPH_CON_S_V2_HELLO ||
con->state == CEPH_CON_S_V2_AUTH) {
head_len = head_onwire_len(ctrl_len, false);
buf = alloc_conn_buf(con, head_len);
if (!buf)
return -ENOMEM;
/* preserve preamble */
memcpy(buf, con->v2.in_buf, CEPH_PREAMBLE_LEN);
add_in_kvec(con, CTRL_BODY(buf), ctrl_len);
add_in_kvec(con, CTRL_BODY(buf) + ctrl_len, CEPH_CRC_LEN);
add_in_sign_kvec(con, buf, head_len);
} else {
if (ctrl_len > CEPH_PREAMBLE_INLINE_LEN) {
buf = alloc_conn_buf(con, ctrl_len);
if (!buf)
return -ENOMEM;
add_in_kvec(con, buf, ctrl_len);
} else {
add_in_kvec(con, CTRL_BODY(con->v2.in_buf), ctrl_len);
}
add_in_kvec(con, con->v2.in_buf, CEPH_CRC_LEN);
}
con->v2.in_state = IN_S_HANDLE_CONTROL;
return 0;
}
static int prepare_read_control_remainder(struct ceph_connection *con)
{
int ctrl_len = con->v2.in_desc.fd_lens[0];
int rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN;
void *buf;
buf = alloc_conn_buf(con, ctrl_len);
if (!buf)
return -ENOMEM;
memcpy(buf, CTRL_BODY(con->v2.in_buf), CEPH_PREAMBLE_INLINE_LEN);
reset_in_kvecs(con);
add_in_kvec(con, buf + CEPH_PREAMBLE_INLINE_LEN, rem_len);
add_in_kvec(con, con->v2.in_buf,
padding_len(rem_len) + CEPH_GCM_TAG_LEN);
con->v2.in_state = IN_S_HANDLE_CONTROL_REMAINDER;
return 0;
}
static int prepare_read_data(struct ceph_connection *con)
{
struct bio_vec bv;
con->in_data_crc = -1;
ceph_msg_data_cursor_init(&con->v2.in_cursor, con->in_msg,
data_len(con->in_msg));
get_bvec_at(&con->v2.in_cursor, &bv);
if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
if (unlikely(!con->bounce_page)) {
con->bounce_page = alloc_page(GFP_NOIO);
if (!con->bounce_page) {
pr_err("failed to allocate bounce page\n");
return -ENOMEM;
}
}
bv.bv_page = con->bounce_page;
bv.bv_offset = 0;
}
set_in_bvec(con, &bv);
con->v2.in_state = IN_S_PREPARE_READ_DATA_CONT;
return 0;
}
static void prepare_read_data_cont(struct ceph_connection *con)
{
struct bio_vec bv;
if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
con->in_data_crc = crc32c(con->in_data_crc,
page_address(con->bounce_page),
con->v2.in_bvec.bv_len);
get_bvec_at(&con->v2.in_cursor, &bv);
memcpy_to_page(bv.bv_page, bv.bv_offset,
page_address(con->bounce_page),
con->v2.in_bvec.bv_len);
} else {
con->in_data_crc = ceph_crc32c_page(con->in_data_crc,
con->v2.in_bvec.bv_page,
con->v2.in_bvec.bv_offset,
con->v2.in_bvec.bv_len);
}
ceph_msg_data_advance(&con->v2.in_cursor, con->v2.in_bvec.bv_len);
if (con->v2.in_cursor.total_resid) {
get_bvec_at(&con->v2.in_cursor, &bv);
if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
bv.bv_page = con->bounce_page;
bv.bv_offset = 0;
}
set_in_bvec(con, &bv);
WARN_ON(con->v2.in_state != IN_S_PREPARE_READ_DATA_CONT);
return;
}
/*
* We've read all data. Prepare to read epilogue.
*/
reset_in_kvecs(con);
add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_PLAIN_LEN);
con->v2.in_state = IN_S_HANDLE_EPILOGUE;
}
static int prepare_sparse_read_cont(struct ceph_connection *con)
{
int ret;
struct bio_vec bv;
char *buf = NULL;
struct ceph_msg_data_cursor *cursor = &con->v2.in_cursor;
WARN_ON(con->v2.in_state != IN_S_PREPARE_SPARSE_DATA_CONT);
if (iov_iter_is_bvec(&con->v2.in_iter)) {
if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
con->in_data_crc = crc32c(con->in_data_crc,
page_address(con->bounce_page),
con->v2.in_bvec.bv_len);
get_bvec_at(cursor, &bv);
memcpy_to_page(bv.bv_page, bv.bv_offset,
page_address(con->bounce_page),
con->v2.in_bvec.bv_len);
} else {
con->in_data_crc = ceph_crc32c_page(con->in_data_crc,
con->v2.in_bvec.bv_page,
con->v2.in_bvec.bv_offset,
con->v2.in_bvec.bv_len);
}
ceph_msg_data_advance(cursor, con->v2.in_bvec.bv_len);
cursor->sr_resid -= con->v2.in_bvec.bv_len;
dout("%s: advance by 0x%x sr_resid 0x%x\n", __func__,
con->v2.in_bvec.bv_len, cursor->sr_resid);
WARN_ON_ONCE(cursor->sr_resid > cursor->total_resid);
if (cursor->sr_resid) {
get_bvec_at(cursor, &bv);
if (bv.bv_len > cursor->sr_resid)
bv.bv_len = cursor->sr_resid;
if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
bv.bv_page = con->bounce_page;
bv.bv_offset = 0;
}
set_in_bvec(con, &bv);
con->v2.data_len_remain -= bv.bv_len;
return 0;
}
} else if (iov_iter_is_kvec(&con->v2.in_iter)) {
/* On first call, we have no kvec so don't compute crc */
if (con->v2.in_kvec_cnt) {
WARN_ON_ONCE(con->v2.in_kvec_cnt > 1);
con->in_data_crc = crc32c(con->in_data_crc,
con->v2.in_kvecs[0].iov_base,
con->v2.in_kvecs[0].iov_len);
}
} else {
return -EIO;
}
/* get next extent */
ret = con->ops->sparse_read(con, cursor, &buf);
if (ret <= 0) {
if (ret < 0)
return ret;
reset_in_kvecs(con);
add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_PLAIN_LEN);
con->v2.in_state = IN_S_HANDLE_EPILOGUE;
return 0;
}
if (buf) {
/* receive into buffer */
reset_in_kvecs(con);
add_in_kvec(con, buf, ret);
con->v2.data_len_remain -= ret;
return 0;
}
if (ret > cursor->total_resid) {
pr_warn("%s: ret 0x%x total_resid 0x%zx resid 0x%zx\n",
__func__, ret, cursor->total_resid, cursor->resid);
return -EIO;
}
get_bvec_at(cursor, &bv);
if (bv.bv_len > cursor->sr_resid)
bv.bv_len = cursor->sr_resid;
if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
if (unlikely(!con->bounce_page)) {
con->bounce_page = alloc_page(GFP_NOIO);
if (!con->bounce_page) {
pr_err("failed to allocate bounce page\n");
return -ENOMEM;
}
}
bv.bv_page = con->bounce_page;
bv.bv_offset = 0;
}
set_in_bvec(con, &bv);
con->v2.data_len_remain -= ret;
return ret;
}
static int prepare_sparse_read_data(struct ceph_connection *con)
{
struct ceph_msg *msg = con->in_msg;
dout("%s: starting sparse read\n", __func__);
if (WARN_ON_ONCE(!con->ops->sparse_read))
return -EOPNOTSUPP;
if (!con_secure(con))
con->in_data_crc = -1;
reset_in_kvecs(con);
con->v2.in_state = IN_S_PREPARE_SPARSE_DATA_CONT;
con->v2.data_len_remain = data_len(msg);
return prepare_sparse_read_cont(con);
}
static int prepare_read_tail_plain(struct ceph_connection *con)
{
struct ceph_msg *msg = con->in_msg;
if (!front_len(msg) && !middle_len(msg)) {
WARN_ON(!data_len(msg));
return prepare_read_data(con);
}
reset_in_kvecs(con);
if (front_len(msg)) {
add_in_kvec(con, msg->front.iov_base, front_len(msg));
WARN_ON(msg->front.iov_len != front_len(msg));
}
if (middle_len(msg)) {
add_in_kvec(con, msg->middle->vec.iov_base, middle_len(msg));
WARN_ON(msg->middle->vec.iov_len != middle_len(msg));
}
if (data_len(msg)) {
if (msg->sparse_read)
con->v2.in_state = IN_S_PREPARE_SPARSE_DATA;
else
con->v2.in_state = IN_S_PREPARE_READ_DATA;
} else {
add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_PLAIN_LEN);
con->v2.in_state = IN_S_HANDLE_EPILOGUE;
}
return 0;
}
static void prepare_read_enc_page(struct ceph_connection *con)
{
struct bio_vec bv;
dout("%s con %p i %d resid %d\n", __func__, con, con->v2.in_enc_i,
con->v2.in_enc_resid);
WARN_ON(!con->v2.in_enc_resid);
bvec_set_page(&bv, con->v2.in_enc_pages[con->v2.in_enc_i],
min(con->v2.in_enc_resid, (int)PAGE_SIZE), 0);
set_in_bvec(con, &bv);
con->v2.in_enc_i++;
con->v2.in_enc_resid -= bv.bv_len;
if (con->v2.in_enc_resid) {
con->v2.in_state = IN_S_PREPARE_READ_ENC_PAGE;
return;
}
/*
* We are set to read the last piece of ciphertext (ending
* with epilogue) + auth tag.
*/
WARN_ON(con->v2.in_enc_i != con->v2.in_enc_page_cnt);
con->v2.in_state = IN_S_HANDLE_EPILOGUE;
}
static int prepare_read_tail_secure(struct ceph_connection *con)
{
struct page **enc_pages;
int enc_page_cnt;
int tail_len;
tail_len = tail_onwire_len(con->in_msg, true);
WARN_ON(!tail_len);
enc_page_cnt = calc_pages_for(0, tail_len);
enc_pages = ceph_alloc_page_vector(enc_page_cnt, GFP_NOIO);
if (IS_ERR(enc_pages))
return PTR_ERR(enc_pages);
WARN_ON(con->v2.in_enc_pages || con->v2.in_enc_page_cnt);
con->v2.in_enc_pages = enc_pages;
con->v2.in_enc_page_cnt = enc_page_cnt;
con->v2.in_enc_resid = tail_len;
con->v2.in_enc_i = 0;
prepare_read_enc_page(con);
return 0;
}
static void __finish_skip(struct ceph_connection *con)
{
con->in_seq++;
prepare_read_preamble(con);
}
static void prepare_skip_message(struct ceph_connection *con)
{
struct ceph_frame_desc *desc = &con->v2.in_desc;
int tail_len;
dout("%s con %p %d+%d+%d\n", __func__, con, desc->fd_lens[1],
desc->fd_lens[2], desc->fd_lens[3]);
tail_len = __tail_onwire_len(desc->fd_lens[1], desc->fd_lens[2],
desc->fd_lens[3], con_secure(con));
if (!tail_len) {
__finish_skip(con);
} else {
set_in_skip(con, tail_len);
con->v2.in_state = IN_S_FINISH_SKIP;
}
}
static int process_banner_prefix(struct ceph_connection *con)
{
int payload_len;
void *p;
WARN_ON(con->v2.in_kvecs[0].iov_len != CEPH_BANNER_V2_PREFIX_LEN);
p = con->v2.in_kvecs[0].iov_base;
if (memcmp(p, CEPH_BANNER_V2, CEPH_BANNER_V2_LEN)) {
if (!memcmp(p, CEPH_BANNER, CEPH_BANNER_LEN))
con->error_msg = "server is speaking msgr1 protocol";
else
con->error_msg = "protocol error, bad banner";
return -EINVAL;
}
p += CEPH_BANNER_V2_LEN;
payload_len = ceph_decode_16(&p);
dout("%s con %p payload_len %d\n", __func__, con, payload_len);
return prepare_read_banner_payload(con, payload_len);
}
static int process_banner_payload(struct ceph_connection *con)
{
void *end = con->v2.in_kvecs[0].iov_base + con->v2.in_kvecs[0].iov_len;
u64 feat = CEPH_MSGR2_SUPPORTED_FEATURES;
u64 req_feat = CEPH_MSGR2_REQUIRED_FEATURES;
u64 server_feat, server_req_feat;
void *p;
int ret;
p = con->v2.in_kvecs[0].iov_base;
ceph_decode_64_safe(&p, end, server_feat, bad);
ceph_decode_64_safe(&p, end, server_req_feat, bad);
dout("%s con %p server_feat 0x%llx server_req_feat 0x%llx\n",
__func__, con, server_feat, server_req_feat);
if (req_feat & ~server_feat) {
pr_err("msgr2 feature set mismatch: my required > server's supported 0x%llx, need 0x%llx\n",
server_feat, req_feat & ~server_feat);
con->error_msg = "missing required protocol features";
return -EINVAL;
}
if (server_req_feat & ~feat) {
pr_err("msgr2 feature set mismatch: server's required > my supported 0x%llx, missing 0x%llx\n",
feat, server_req_feat & ~feat);
con->error_msg = "missing required protocol features";
return -EINVAL;
}
/* no reset_out_kvecs() as our banner may still be pending */
ret = prepare_hello(con);
if (ret) {
pr_err("prepare_hello failed: %d\n", ret);
return ret;
}
con->state = CEPH_CON_S_V2_HELLO;
prepare_read_preamble(con);
return 0;
bad:
pr_err("failed to decode banner payload\n");
return -EINVAL;
}
static int process_hello(struct ceph_connection *con, void *p, void *end)
{
struct ceph_entity_addr *my_addr = &con->msgr->inst.addr;
struct ceph_entity_addr addr_for_me;
u8 entity_type;
int ret;
if (con->state != CEPH_CON_S_V2_HELLO) {
con->error_msg = "protocol error, unexpected hello";
return -EINVAL;
}
ceph_decode_8_safe(&p, end, entity_type, bad);
ret = ceph_decode_entity_addr(&p, end, &addr_for_me);
if (ret) {
pr_err("failed to decode addr_for_me: %d\n", ret);
return ret;
}
dout("%s con %p entity_type %d addr_for_me %s\n", __func__, con,
entity_type, ceph_pr_addr(&addr_for_me));
if (entity_type != con->peer_name.type) {
pr_err("bad peer type, want %d, got %d\n",
con->peer_name.type, entity_type);
con->error_msg = "wrong peer at address";
return -EINVAL;
}
/*
* Set our address to the address our first peer (i.e. monitor)
* sees that we are connecting from. If we are behind some sort
* of NAT and want to be identified by some private (not NATed)
* address, ip option should be used.
*/
if (ceph_addr_is_blank(my_addr)) {
memcpy(&my_addr->in_addr, &addr_for_me.in_addr,
sizeof(my_addr->in_addr));
ceph_addr_set_port(my_addr, 0);
dout("%s con %p set my addr %s, as seen by peer %s\n",
__func__, con, ceph_pr_addr(my_addr),
ceph_pr_addr(&con->peer_addr));
} else {
dout("%s con %p my addr already set %s\n",
__func__, con, ceph_pr_addr(my_addr));
}
WARN_ON(ceph_addr_is_blank(my_addr) || ceph_addr_port(my_addr));
WARN_ON(my_addr->type != CEPH_ENTITY_ADDR_TYPE_ANY);
WARN_ON(!my_addr->nonce);
/* no reset_out_kvecs() as our hello may still be pending */
ret = prepare_auth_request(con);
if (ret) {
if (ret != -EAGAIN)
pr_err("prepare_auth_request failed: %d\n", ret);
return ret;
}
con->state = CEPH_CON_S_V2_AUTH;
return 0;
bad:
pr_err("failed to decode hello\n");
return -EINVAL;
}
static int process_auth_bad_method(struct ceph_connection *con,
void *p, void *end)
{
int allowed_protos[8], allowed_modes[8];
int allowed_proto_cnt, allowed_mode_cnt;
int used_proto, result;
int ret;
int i;
if (con->state != CEPH_CON_S_V2_AUTH) {
con->error_msg = "protocol error, unexpected auth_bad_method";
return -EINVAL;
}
ceph_decode_32_safe(&p, end, used_proto, bad);
ceph_decode_32_safe(&p, end, result, bad);
dout("%s con %p used_proto %d result %d\n", __func__, con, used_proto,
result);
ceph_decode_32_safe(&p, end, allowed_proto_cnt, bad);
if (allowed_proto_cnt > ARRAY_SIZE(allowed_protos)) {
pr_err("allowed_protos too big %d\n", allowed_proto_cnt);
return -EINVAL;
}
for (i = 0; i < allowed_proto_cnt; i++) {
ceph_decode_32_safe(&p, end, allowed_protos[i], bad);
dout("%s con %p allowed_protos[%d] %d\n", __func__, con,
i, allowed_protos[i]);
}
ceph_decode_32_safe(&p, end, allowed_mode_cnt, bad);
if (allowed_mode_cnt > ARRAY_SIZE(allowed_modes)) {
pr_err("allowed_modes too big %d\n", allowed_mode_cnt);
return -EINVAL;
}
for (i = 0; i < allowed_mode_cnt; i++) {
ceph_decode_32_safe(&p, end, allowed_modes[i], bad);
dout("%s con %p allowed_modes[%d] %d\n", __func__, con,
i, allowed_modes[i]);
}
mutex_unlock(&con->mutex);
ret = con->ops->handle_auth_bad_method(con, used_proto, result,
allowed_protos,
allowed_proto_cnt,
allowed_modes,
allowed_mode_cnt);
mutex_lock(&con->mutex);
if (con->state != CEPH_CON_S_V2_AUTH) {
dout("%s con %p state changed to %d\n", __func__, con,
con->state);
return -EAGAIN;
}
dout("%s con %p handle_auth_bad_method ret %d\n", __func__, con, ret);
return ret;
bad:
pr_err("failed to decode auth_bad_method\n");
return -EINVAL;
}
static int process_auth_reply_more(struct ceph_connection *con,
void *p, void *end)
{
int payload_len;
int ret;
if (con->state != CEPH_CON_S_V2_AUTH) {
con->error_msg = "protocol error, unexpected auth_reply_more";
return -EINVAL;
}
ceph_decode_32_safe(&p, end, payload_len, bad);
ceph_decode_need(&p, end, payload_len, bad);
dout("%s con %p payload_len %d\n", __func__, con, payload_len);
reset_out_kvecs(con);
ret = prepare_auth_request_more(con, p, payload_len);
if (ret) {
if (ret != -EAGAIN)
pr_err("prepare_auth_request_more failed: %d\n", ret);
return ret;
}
return 0;
bad:
pr_err("failed to decode auth_reply_more\n");
return -EINVAL;
}
/*
* Align session_key and con_secret to avoid GFP_ATOMIC allocation
* inside crypto_shash_setkey() and crypto_aead_setkey() called from
* setup_crypto(). __aligned(16) isn't guaranteed to work for stack
* objects, so do it by hand.
*/
static int process_auth_done(struct ceph_connection *con, void *p, void *end)
{
u8 session_key_buf[CEPH_KEY_LEN + 16];
u8 con_secret_buf[CEPH_MAX_CON_SECRET_LEN + 16];
u8 *session_key = PTR_ALIGN(&session_key_buf[0], 16);
u8 *con_secret = PTR_ALIGN(&con_secret_buf[0], 16);
int session_key_len, con_secret_len;
int payload_len;
u64 global_id;
int ret;
if (con->state != CEPH_CON_S_V2_AUTH) {
con->error_msg = "protocol error, unexpected auth_done";
return -EINVAL;
}
ceph_decode_64_safe(&p, end, global_id, bad);
ceph_decode_32_safe(&p, end, con->v2.con_mode, bad);
ceph_decode_32_safe(&p, end, payload_len, bad);
dout("%s con %p global_id %llu con_mode %d payload_len %d\n",
__func__, con, global_id, con->v2.con_mode, payload_len);
mutex_unlock(&con->mutex);
session_key_len = 0;
con_secret_len = 0;
ret = con->ops->handle_auth_done(con, global_id, p, payload_len,
session_key, &session_key_len,
con_secret, &con_secret_len);
mutex_lock(&con->mutex);
if (con->state != CEPH_CON_S_V2_AUTH) {
dout("%s con %p state changed to %d\n", __func__, con,
con->state);
ret = -EAGAIN;
goto out;
}
dout("%s con %p handle_auth_done ret %d\n", __func__, con, ret);
if (ret)
goto out;
ret = setup_crypto(con, session_key, session_key_len, con_secret,
con_secret_len);
if (ret)
goto out;
reset_out_kvecs(con);
ret = prepare_auth_signature(con);
if (ret) {
pr_err("prepare_auth_signature failed: %d\n", ret);
goto out;
}
con->state = CEPH_CON_S_V2_AUTH_SIGNATURE;
out:
memzero_explicit(session_key_buf, sizeof(session_key_buf));
memzero_explicit(con_secret_buf, sizeof(con_secret_buf));
return ret;
bad:
pr_err("failed to decode auth_done\n");
return -EINVAL;
}
static int process_auth_signature(struct ceph_connection *con,
void *p, void *end)
{
u8 hmac[SHA256_DIGEST_SIZE];
int ret;
if (con->state != CEPH_CON_S_V2_AUTH_SIGNATURE) {
con->error_msg = "protocol error, unexpected auth_signature";
return -EINVAL;
}
ret = hmac_sha256(con, con->v2.out_sign_kvecs,
con->v2.out_sign_kvec_cnt, hmac);
if (ret)
return ret;
ceph_decode_need(&p, end, SHA256_DIGEST_SIZE, bad);
if (crypto_memneq(p, hmac, SHA256_DIGEST_SIZE)) {
con->error_msg = "integrity error, bad auth signature";
return -EBADMSG;
}
dout("%s con %p auth signature ok\n", __func__, con);
/* no reset_out_kvecs() as our auth_signature may still be pending */
if (!con->v2.server_cookie) {
ret = prepare_client_ident(con);
if (ret) {
pr_err("prepare_client_ident failed: %d\n", ret);
return ret;
}
con->state = CEPH_CON_S_V2_SESSION_CONNECT;
} else {
ret = prepare_session_reconnect(con);
if (ret) {
pr_err("prepare_session_reconnect failed: %d\n", ret);
return ret;
}
con->state = CEPH_CON_S_V2_SESSION_RECONNECT;
}
return 0;
bad:
pr_err("failed to decode auth_signature\n");
return -EINVAL;
}
static int process_server_ident(struct ceph_connection *con,
void *p, void *end)
{
struct ceph_client *client = from_msgr(con->msgr);
u64 features, required_features;
struct ceph_entity_addr addr;
u64 global_seq;
u64 global_id;
u64 cookie;
u64 flags;
int ret;
if (con->state != CEPH_CON_S_V2_SESSION_CONNECT) {
con->error_msg = "protocol error, unexpected server_ident";
return -EINVAL;
}
ret = ceph_decode_entity_addrvec(&p, end, true, &addr);
if (ret) {
pr_err("failed to decode server addrs: %d\n", ret);
return ret;
}
ceph_decode_64_safe(&p, end, global_id, bad);
ceph_decode_64_safe(&p, end, global_seq, bad);
ceph_decode_64_safe(&p, end, features, bad);
ceph_decode_64_safe(&p, end, required_features, bad);
ceph_decode_64_safe(&p, end, flags, bad);
ceph_decode_64_safe(&p, end, cookie, bad);
dout("%s con %p addr %s/%u global_id %llu global_seq %llu features 0x%llx required_features 0x%llx flags 0x%llx cookie 0x%llx\n",
__func__, con, ceph_pr_addr(&addr), le32_to_cpu(addr.nonce),
global_id, global_seq, features, required_features, flags, cookie);
/* is this who we intended to talk to? */
if (memcmp(&addr, &con->peer_addr, sizeof(con->peer_addr))) {
pr_err("bad peer addr/nonce, want %s/%u, got %s/%u\n",
ceph_pr_addr(&con->peer_addr),
le32_to_cpu(con->peer_addr.nonce),
ceph_pr_addr(&addr), le32_to_cpu(addr.nonce));
con->error_msg = "wrong peer at address";
return -EINVAL;
}
if (client->required_features & ~features) {
pr_err("RADOS feature set mismatch: my required > server's supported 0x%llx, need 0x%llx\n",
features, client->required_features & ~features);
con->error_msg = "missing required protocol features";
return -EINVAL;
}
/*
* Both name->type and name->num are set in ceph_con_open() but
* name->num may be bogus in the initial monmap. name->type is
* verified in handle_hello().
*/
WARN_ON(!con->peer_name.type);
con->peer_name.num = cpu_to_le64(global_id);
con->v2.peer_global_seq = global_seq;
con->peer_features = features;
WARN_ON(required_features & ~client->supported_features);
con->v2.server_cookie = cookie;
if (flags & CEPH_MSG_CONNECT_LOSSY) {
ceph_con_flag_set(con, CEPH_CON_F_LOSSYTX);
WARN_ON(con->v2.server_cookie);
} else {
WARN_ON(!con->v2.server_cookie);
}
clear_in_sign_kvecs(con);
clear_out_sign_kvecs(con);
free_conn_bufs(con);
con->delay = 0; /* reset backoff memory */
con->state = CEPH_CON_S_OPEN;
con->v2.out_state = OUT_S_GET_NEXT;
return 0;
bad:
pr_err("failed to decode server_ident\n");
return -EINVAL;
}
static int process_ident_missing_features(struct ceph_connection *con,
void *p, void *end)
{
struct ceph_client *client = from_msgr(con->msgr);
u64 missing_features;
if (con->state != CEPH_CON_S_V2_SESSION_CONNECT) {
con->error_msg = "protocol error, unexpected ident_missing_features";
return -EINVAL;
}
ceph_decode_64_safe(&p, end, missing_features, bad);
pr_err("RADOS feature set mismatch: server's required > my supported 0x%llx, missing 0x%llx\n",
client->supported_features, missing_features);
con->error_msg = "missing required protocol features";
return -EINVAL;
bad:
pr_err("failed to decode ident_missing_features\n");
return -EINVAL;
}
static int process_session_reconnect_ok(struct ceph_connection *con,
void *p, void *end)
{
u64 seq;
if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
con->error_msg = "protocol error, unexpected session_reconnect_ok";
return -EINVAL;
}
ceph_decode_64_safe(&p, end, seq, bad);
dout("%s con %p seq %llu\n", __func__, con, seq);
ceph_con_discard_requeued(con, seq);
clear_in_sign_kvecs(con);
clear_out_sign_kvecs(con);
free_conn_bufs(con);
con->delay = 0; /* reset backoff memory */
con->state = CEPH_CON_S_OPEN;
con->v2.out_state = OUT_S_GET_NEXT;
return 0;
bad:
pr_err("failed to decode session_reconnect_ok\n");
return -EINVAL;
}
static int process_session_retry(struct ceph_connection *con,
void *p, void *end)
{
u64 connect_seq;
int ret;
if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
con->error_msg = "protocol error, unexpected session_retry";
return -EINVAL;
}
ceph_decode_64_safe(&p, end, connect_seq, bad);
dout("%s con %p connect_seq %llu\n", __func__, con, connect_seq);
WARN_ON(connect_seq <= con->v2.connect_seq);
con->v2.connect_seq = connect_seq + 1;
free_conn_bufs(con);
reset_out_kvecs(con);
ret = prepare_session_reconnect(con);
if (ret) {
pr_err("prepare_session_reconnect (cseq) failed: %d\n", ret);
return ret;
}
return 0;
bad:
pr_err("failed to decode session_retry\n");
return -EINVAL;
}
static int process_session_retry_global(struct ceph_connection *con,
void *p, void *end)
{
u64 global_seq;
int ret;
if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
con->error_msg = "protocol error, unexpected session_retry_global";
return -EINVAL;
}
ceph_decode_64_safe(&p, end, global_seq, bad);
dout("%s con %p global_seq %llu\n", __func__, con, global_seq);
WARN_ON(global_seq <= con->v2.global_seq);
con->v2.global_seq = ceph_get_global_seq(con->msgr, global_seq);
free_conn_bufs(con);
reset_out_kvecs(con);
ret = prepare_session_reconnect(con);
if (ret) {
pr_err("prepare_session_reconnect (gseq) failed: %d\n", ret);
return ret;
}
return 0;
bad:
pr_err("failed to decode session_retry_global\n");
return -EINVAL;
}
static int process_session_reset(struct ceph_connection *con,
void *p, void *end)
{
bool full;
int ret;
if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
con->error_msg = "protocol error, unexpected session_reset";
return -EINVAL;
}
ceph_decode_8_safe(&p, end, full, bad);
if (!full) {
con->error_msg = "protocol error, bad session_reset";
return -EINVAL;
}
pr_info("%s%lld %s session reset\n", ENTITY_NAME(con->peer_name),
ceph_pr_addr(&con->peer_addr));
ceph_con_reset_session(con);
mutex_unlock(&con->mutex);
if (con->ops->peer_reset)
con->ops->peer_reset(con);
mutex_lock(&con->mutex);
if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
dout("%s con %p state changed to %d\n", __func__, con,
con->state);
return -EAGAIN;
}
free_conn_bufs(con);
reset_out_kvecs(con);
ret = prepare_client_ident(con);
if (ret) {
pr_err("prepare_client_ident (rst) failed: %d\n", ret);
return ret;
}
con->state = CEPH_CON_S_V2_SESSION_CONNECT;
return 0;
bad:
pr_err("failed to decode session_reset\n");
return -EINVAL;
}
static int process_keepalive2_ack(struct ceph_connection *con,
void *p, void *end)
{
if (con->state != CEPH_CON_S_OPEN) {
con->error_msg = "protocol error, unexpected keepalive2_ack";
return -EINVAL;
}
ceph_decode_need(&p, end, sizeof(struct ceph_timespec), bad);
ceph_decode_timespec64(&con->last_keepalive_ack, p);
dout("%s con %p timestamp %lld.%09ld\n", __func__, con,
con->last_keepalive_ack.tv_sec, con->last_keepalive_ack.tv_nsec);
return 0;
bad:
pr_err("failed to decode keepalive2_ack\n");
return -EINVAL;
}
static int process_ack(struct ceph_connection *con, void *p, void *end)
{
u64 seq;
if (con->state != CEPH_CON_S_OPEN) {
con->error_msg = "protocol error, unexpected ack";
return -EINVAL;
}
ceph_decode_64_safe(&p, end, seq, bad);
dout("%s con %p seq %llu\n", __func__, con, seq);
ceph_con_discard_sent(con, seq);
return 0;
bad:
pr_err("failed to decode ack\n");
return -EINVAL;
}
static int process_control(struct ceph_connection *con, void *p, void *end)
{
int tag = con->v2.in_desc.fd_tag;
int ret;
dout("%s con %p tag %d len %d\n", __func__, con, tag, (int)(end - p));
switch (tag) {
case FRAME_TAG_HELLO:
ret = process_hello(con, p, end);
break;
case FRAME_TAG_AUTH_BAD_METHOD:
ret = process_auth_bad_method(con, p, end);
break;
case FRAME_TAG_AUTH_REPLY_MORE:
ret = process_auth_reply_more(con, p, end);
break;
case FRAME_TAG_AUTH_DONE:
ret = process_auth_done(con, p, end);
break;
case FRAME_TAG_AUTH_SIGNATURE:
ret = process_auth_signature(con, p, end);
break;
case FRAME_TAG_SERVER_IDENT:
ret = process_server_ident(con, p, end);
break;
case FRAME_TAG_IDENT_MISSING_FEATURES:
ret = process_ident_missing_features(con, p, end);
break;
case FRAME_TAG_SESSION_RECONNECT_OK:
ret = process_session_reconnect_ok(con, p, end);
break;
case FRAME_TAG_SESSION_RETRY:
ret = process_session_retry(con, p, end);
break;
case FRAME_TAG_SESSION_RETRY_GLOBAL:
ret = process_session_retry_global(con, p, end);
break;
case FRAME_TAG_SESSION_RESET:
ret = process_session_reset(con, p, end);
break;
case FRAME_TAG_KEEPALIVE2_ACK:
ret = process_keepalive2_ack(con, p, end);
break;
case FRAME_TAG_ACK:
ret = process_ack(con, p, end);
break;
default:
pr_err("bad tag %d\n", tag);
con->error_msg = "protocol error, bad tag";
return -EINVAL;
}
if (ret) {
dout("%s con %p error %d\n", __func__, con, ret);
return ret;
}
prepare_read_preamble(con);
return 0;
}
/*
* Return:
* 1 - con->in_msg set, read message
* 0 - skip message
* <0 - error
*/
static int process_message_header(struct ceph_connection *con,
void *p, void *end)
{
struct ceph_frame_desc *desc = &con->v2.in_desc;
struct ceph_msg_header2 *hdr2 = p;
struct ceph_msg_header hdr;
int skip;
int ret;
u64 seq;
/* verify seq# */
seq = le64_to_cpu(hdr2->seq);
if ((s64)seq - (s64)con->in_seq < 1) {
pr_info("%s%lld %s skipping old message: seq %llu, expected %llu\n",
ENTITY_NAME(con->peer_name),
ceph_pr_addr(&con->peer_addr),
seq, con->in_seq + 1);
return 0;
}
if ((s64)seq - (s64)con->in_seq > 1) {
pr_err("bad seq %llu, expected %llu\n", seq, con->in_seq + 1);
con->error_msg = "bad message sequence # for incoming message";
return -EBADE;
}
ceph_con_discard_sent(con, le64_to_cpu(hdr2->ack_seq));
fill_header(&hdr, hdr2, desc->fd_lens[1], desc->fd_lens[2],
desc->fd_lens[3], &con->peer_name);
ret = ceph_con_in_msg_alloc(con, &hdr, &skip);
if (ret)
return ret;
WARN_ON(!con->in_msg ^ skip);
if (skip)
return 0;
WARN_ON(!con->in_msg);
WARN_ON(con->in_msg->con != con);
return 1;
}
static int process_message(struct ceph_connection *con)
{
ceph_con_process_message(con);
/*
* We could have been closed by ceph_con_close() because
* ceph_con_process_message() temporarily drops con->mutex.
*/
if (con->state != CEPH_CON_S_OPEN) {
dout("%s con %p state changed to %d\n", __func__, con,
con->state);
return -EAGAIN;
}
prepare_read_preamble(con);
return 0;
}
static int __handle_control(struct ceph_connection *con, void *p)
{
void *end = p + con->v2.in_desc.fd_lens[0];
struct ceph_msg *msg;
int ret;
if (con->v2.in_desc.fd_tag != FRAME_TAG_MESSAGE)
return process_control(con, p, end);
ret = process_message_header(con, p, end);
if (ret < 0)
return ret;
if (ret == 0) {
prepare_skip_message(con);
return 0;
}
msg = con->in_msg; /* set in process_message_header() */
if (front_len(msg)) {
WARN_ON(front_len(msg) > msg->front_alloc_len);
msg->front.iov_len = front_len(msg);
} else {
msg->front.iov_len = 0;
}
if (middle_len(msg)) {
WARN_ON(middle_len(msg) > msg->middle->alloc_len);
msg->middle->vec.iov_len = middle_len(msg);
} else if (msg->middle) {
msg->middle->vec.iov_len = 0;
}
if (!front_len(msg) && !middle_len(msg) && !data_len(msg))
return process_message(con);
if (con_secure(con))
return prepare_read_tail_secure(con);
return prepare_read_tail_plain(con);
}
static int handle_preamble(struct ceph_connection *con)
{
struct ceph_frame_desc *desc = &con->v2.in_desc;
int ret;
if (con_secure(con)) {
ret = decrypt_preamble(con);
if (ret) {
if (ret == -EBADMSG)
con->error_msg = "integrity error, bad preamble auth tag";
return ret;
}
}
ret = decode_preamble(con->v2.in_buf, desc);
if (ret) {
if (ret == -EBADMSG)
con->error_msg = "integrity error, bad crc";
else
con->error_msg = "protocol error, bad preamble";
return ret;
}
dout("%s con %p tag %d seg_cnt %d %d+%d+%d+%d\n", __func__,
con, desc->fd_tag, desc->fd_seg_cnt, desc->fd_lens[0],
desc->fd_lens[1], desc->fd_lens[2], desc->fd_lens[3]);
if (!con_secure(con))
return prepare_read_control(con);
if (desc->fd_lens[0] > CEPH_PREAMBLE_INLINE_LEN)
return prepare_read_control_remainder(con);
return __handle_control(con, CTRL_BODY(con->v2.in_buf));
}
static int handle_control(struct ceph_connection *con)
{
int ctrl_len = con->v2.in_desc.fd_lens[0];
void *buf;
int ret;
WARN_ON(con_secure(con));
ret = verify_control_crc(con);
if (ret) {
con->error_msg = "integrity error, bad crc";
return ret;
}
if (con->state == CEPH_CON_S_V2_AUTH) {
buf = alloc_conn_buf(con, ctrl_len);
if (!buf)
return -ENOMEM;
memcpy(buf, con->v2.in_kvecs[0].iov_base, ctrl_len);
return __handle_control(con, buf);
}
return __handle_control(con, con->v2.in_kvecs[0].iov_base);
}
static int handle_control_remainder(struct ceph_connection *con)
{
int ret;
WARN_ON(!con_secure(con));
ret = decrypt_control_remainder(con);
if (ret) {
if (ret == -EBADMSG)
con->error_msg = "integrity error, bad control remainder auth tag";
return ret;
}
return __handle_control(con, con->v2.in_kvecs[0].iov_base -
CEPH_PREAMBLE_INLINE_LEN);
}
static int handle_epilogue(struct ceph_connection *con)
{
u32 front_crc, middle_crc, data_crc;
int ret;
if (con_secure(con)) {
ret = decrypt_tail(con);
if (ret) {
if (ret == -EBADMSG)
con->error_msg = "integrity error, bad epilogue auth tag";
return ret;
}
/* just late_status */
ret = decode_epilogue(con->v2.in_buf, NULL, NULL, NULL);
if (ret) {
con->error_msg = "protocol error, bad epilogue";
return ret;
}
} else {
ret = decode_epilogue(con->v2.in_buf, &front_crc,
&middle_crc, &data_crc);
if (ret) {
con->error_msg = "protocol error, bad epilogue";
return ret;
}
ret = verify_epilogue_crcs(con, front_crc, middle_crc,
data_crc);
if (ret) {
con->error_msg = "integrity error, bad crc";
return ret;
}
}
return process_message(con);
}
static void finish_skip(struct ceph_connection *con)
{
dout("%s con %p\n", __func__, con);
if (con_secure(con))
gcm_inc_nonce(&con->v2.in_gcm_nonce);
__finish_skip(con);
}
static int populate_in_iter(struct ceph_connection *con)
{
int ret;
dout("%s con %p state %d in_state %d\n", __func__, con, con->state,
con->v2.in_state);
WARN_ON(iov_iter_count(&con->v2.in_iter));
if (con->state == CEPH_CON_S_V2_BANNER_PREFIX) {
ret = process_banner_prefix(con);
} else if (con->state == CEPH_CON_S_V2_BANNER_PAYLOAD) {
ret = process_banner_payload(con);
} else if ((con->state >= CEPH_CON_S_V2_HELLO &&
con->state <= CEPH_CON_S_V2_SESSION_RECONNECT) ||
con->state == CEPH_CON_S_OPEN) {
switch (con->v2.in_state) {
case IN_S_HANDLE_PREAMBLE:
ret = handle_preamble(con);
break;
case IN_S_HANDLE_CONTROL:
ret = handle_control(con);
break;
case IN_S_HANDLE_CONTROL_REMAINDER:
ret = handle_control_remainder(con);
break;
case IN_S_PREPARE_READ_DATA:
ret = prepare_read_data(con);
break;
case IN_S_PREPARE_READ_DATA_CONT:
prepare_read_data_cont(con);
ret = 0;
break;
case IN_S_PREPARE_READ_ENC_PAGE:
prepare_read_enc_page(con);
ret = 0;
break;
case IN_S_PREPARE_SPARSE_DATA:
ret = prepare_sparse_read_data(con);
break;
case IN_S_PREPARE_SPARSE_DATA_CONT:
ret = prepare_sparse_read_cont(con);
break;
case IN_S_HANDLE_EPILOGUE:
ret = handle_epilogue(con);
break;
case IN_S_FINISH_SKIP:
finish_skip(con);
ret = 0;
break;
default:
WARN(1, "bad in_state %d", con->v2.in_state);
return -EINVAL;
}
} else {
WARN(1, "bad state %d", con->state);
return -EINVAL;
}
if (ret) {
dout("%s con %p error %d\n", __func__, con, ret);
return ret;
}
if (WARN_ON(!iov_iter_count(&con->v2.in_iter)))
return -ENODATA;
dout("%s con %p populated %zu\n", __func__, con,
iov_iter_count(&con->v2.in_iter));
return 1;
}
int ceph_con_v2_try_read(struct ceph_connection *con)
{
int ret;
dout("%s con %p state %d need %zu\n", __func__, con, con->state,
iov_iter_count(&con->v2.in_iter));
if (con->state == CEPH_CON_S_PREOPEN)
return 0;
/*
* We should always have something pending here. If not,
* avoid calling populate_in_iter() as if we read something
* (ceph_tcp_recv() would immediately return 1).
*/
if (WARN_ON(!iov_iter_count(&con->v2.in_iter)))
return -ENODATA;
for (;;) {
ret = ceph_tcp_recv(con);
if (ret <= 0)
return ret;
ret = populate_in_iter(con);
if (ret <= 0) {
if (ret && ret != -EAGAIN && !con->error_msg)
con->error_msg = "read processing error";
return ret;
}
}
}
static void queue_data(struct ceph_connection *con)
{
struct bio_vec bv;
con->v2.out_epil.data_crc = -1;
ceph_msg_data_cursor_init(&con->v2.out_cursor, con->out_msg,
data_len(con->out_msg));
get_bvec_at(&con->v2.out_cursor, &bv);
set_out_bvec(con, &bv, true);
con->v2.out_state = OUT_S_QUEUE_DATA_CONT;
}
static void queue_data_cont(struct ceph_connection *con)
{
struct bio_vec bv;
con->v2.out_epil.data_crc = ceph_crc32c_page(
con->v2.out_epil.data_crc, con->v2.out_bvec.bv_page,
con->v2.out_bvec.bv_offset, con->v2.out_bvec.bv_len);
ceph_msg_data_advance(&con->v2.out_cursor, con->v2.out_bvec.bv_len);
if (con->v2.out_cursor.total_resid) {
get_bvec_at(&con->v2.out_cursor, &bv);
set_out_bvec(con, &bv, true);
WARN_ON(con->v2.out_state != OUT_S_QUEUE_DATA_CONT);
return;
}
/*
* We've written all data. Queue epilogue. Once it's written,
* we are done.
*/
reset_out_kvecs(con);
prepare_epilogue_plain(con, false);
con->v2.out_state = OUT_S_FINISH_MESSAGE;
}
static void queue_enc_page(struct ceph_connection *con)
{
struct bio_vec bv;
dout("%s con %p i %d resid %d\n", __func__, con, con->v2.out_enc_i,
con->v2.out_enc_resid);
WARN_ON(!con->v2.out_enc_resid);
bvec_set_page(&bv, con->v2.out_enc_pages[con->v2.out_enc_i],
min(con->v2.out_enc_resid, (int)PAGE_SIZE), 0);
set_out_bvec(con, &bv, false);
con->v2.out_enc_i++;
con->v2.out_enc_resid -= bv.bv_len;
if (con->v2.out_enc_resid) {
WARN_ON(con->v2.out_state != OUT_S_QUEUE_ENC_PAGE);
return;
}
/*
* We've queued the last piece of ciphertext (ending with
* epilogue) + auth tag. Once it's written, we are done.
*/
WARN_ON(con->v2.out_enc_i != con->v2.out_enc_page_cnt);
con->v2.out_state = OUT_S_FINISH_MESSAGE;
}
static void queue_zeros(struct ceph_connection *con)
{
dout("%s con %p out_zero %d\n", __func__, con, con->v2.out_zero);
if (con->v2.out_zero) {
set_out_bvec_zero(con);
con->v2.out_zero -= con->v2.out_bvec.bv_len;
con->v2.out_state = OUT_S_QUEUE_ZEROS;
return;
}
/*
* We've zero-filled everything up to epilogue. Queue epilogue
* with late_status set to ABORTED and crcs adjusted for zeros.
* Once it's written, we are done patching up for the revoke.
*/
reset_out_kvecs(con);
prepare_epilogue_plain(con, true);
con->v2.out_state = OUT_S_FINISH_MESSAGE;
}
static void finish_message(struct ceph_connection *con)
{
dout("%s con %p msg %p\n", __func__, con, con->out_msg);
/* we end up here both plain and secure modes */
if (con->v2.out_enc_pages) {
WARN_ON(!con->v2.out_enc_page_cnt);
ceph_release_page_vector(con->v2.out_enc_pages,
con->v2.out_enc_page_cnt);
con->v2.out_enc_pages = NULL;
con->v2.out_enc_page_cnt = 0;
}
/* message may have been revoked */
if (con->out_msg) {
ceph_msg_put(con->out_msg);
con->out_msg = NULL;
}
con->v2.out_state = OUT_S_GET_NEXT;
}
static int populate_out_iter(struct ceph_connection *con)
{
int ret;
dout("%s con %p state %d out_state %d\n", __func__, con, con->state,
con->v2.out_state);
WARN_ON(iov_iter_count(&con->v2.out_iter));
if (con->state != CEPH_CON_S_OPEN) {
WARN_ON(con->state < CEPH_CON_S_V2_BANNER_PREFIX ||
con->state > CEPH_CON_S_V2_SESSION_RECONNECT);
goto nothing_pending;
}
switch (con->v2.out_state) {
case OUT_S_QUEUE_DATA:
WARN_ON(!con->out_msg);
queue_data(con);
goto populated;
case OUT_S_QUEUE_DATA_CONT:
WARN_ON(!con->out_msg);
queue_data_cont(con);
goto populated;
case OUT_S_QUEUE_ENC_PAGE:
queue_enc_page(con);
goto populated;
case OUT_S_QUEUE_ZEROS:
WARN_ON(con->out_msg); /* revoked */
queue_zeros(con);
goto populated;
case OUT_S_FINISH_MESSAGE:
finish_message(con);
break;
case OUT_S_GET_NEXT:
break;
default:
WARN(1, "bad out_state %d", con->v2.out_state);
return -EINVAL;
}
WARN_ON(con->v2.out_state != OUT_S_GET_NEXT);
if (ceph_con_flag_test_and_clear(con, CEPH_CON_F_KEEPALIVE_PENDING)) {
ret = prepare_keepalive2(con);
if (ret) {
pr_err("prepare_keepalive2 failed: %d\n", ret);
return ret;
}
} else if (!list_empty(&con->out_queue)) {
ceph_con_get_out_msg(con);
ret = prepare_message(con);
if (ret) {
pr_err("prepare_message failed: %d\n", ret);
return ret;
}
} else if (con->in_seq > con->in_seq_acked) {
ret = prepare_ack(con);
if (ret) {
pr_err("prepare_ack failed: %d\n", ret);
return ret;
}
} else {
goto nothing_pending;
}
populated:
if (WARN_ON(!iov_iter_count(&con->v2.out_iter)))
return -ENODATA;
dout("%s con %p populated %zu\n", __func__, con,
iov_iter_count(&con->v2.out_iter));
return 1;
nothing_pending:
WARN_ON(iov_iter_count(&con->v2.out_iter));
dout("%s con %p nothing pending\n", __func__, con);
ceph_con_flag_clear(con, CEPH_CON_F_WRITE_PENDING);
return 0;
}
int ceph_con_v2_try_write(struct ceph_connection *con)
{
int ret;
dout("%s con %p state %d have %zu\n", __func__, con, con->state,
iov_iter_count(&con->v2.out_iter));
/* open the socket first? */
if (con->state == CEPH_CON_S_PREOPEN) {
WARN_ON(con->peer_addr.type != CEPH_ENTITY_ADDR_TYPE_MSGR2);
/*
* Always bump global_seq. Bump connect_seq only if
* there is a session (i.e. we are reconnecting and will
* send session_reconnect instead of client_ident).
*/
con->v2.global_seq = ceph_get_global_seq(con->msgr, 0);
if (con->v2.server_cookie)
con->v2.connect_seq++;
ret = prepare_read_banner_prefix(con);
if (ret) {
pr_err("prepare_read_banner_prefix failed: %d\n", ret);
con->error_msg = "connect error";
return ret;
}
reset_out_kvecs(con);
ret = prepare_banner(con);
if (ret) {
pr_err("prepare_banner failed: %d\n", ret);
con->error_msg = "connect error";
return ret;
}
ret = ceph_tcp_connect(con);
if (ret) {
pr_err("ceph_tcp_connect failed: %d\n", ret);
con->error_msg = "connect error";
return ret;
}
}
if (!iov_iter_count(&con->v2.out_iter)) {
ret = populate_out_iter(con);
if (ret <= 0) {
if (ret && ret != -EAGAIN && !con->error_msg)
con->error_msg = "write processing error";
return ret;
}
}
tcp_sock_set_cork(con->sock->sk, true);
for (;;) {
ret = ceph_tcp_send(con);
if (ret <= 0)
break;
ret = populate_out_iter(con);
if (ret <= 0) {
if (ret && ret != -EAGAIN && !con->error_msg)
con->error_msg = "write processing error";
break;
}
}
tcp_sock_set_cork(con->sock->sk, false);
return ret;
}
static u32 crc32c_zeros(u32 crc, int zero_len)
{
int len;
while (zero_len) {
len = min(zero_len, (int)PAGE_SIZE);
crc = crc32c(crc, page_address(ceph_zero_page), len);
zero_len -= len;
}
return crc;
}
static void prepare_zero_front(struct ceph_connection *con, int resid)
{
int sent;
WARN_ON(!resid || resid > front_len(con->out_msg));
sent = front_len(con->out_msg) - resid;
dout("%s con %p sent %d resid %d\n", __func__, con, sent, resid);
if (sent) {
con->v2.out_epil.front_crc =
crc32c(-1, con->out_msg->front.iov_base, sent);
con->v2.out_epil.front_crc =
crc32c_zeros(con->v2.out_epil.front_crc, resid);
} else {
con->v2.out_epil.front_crc = crc32c_zeros(-1, resid);
}
con->v2.out_iter.count -= resid;
out_zero_add(con, resid);
}
static void prepare_zero_middle(struct ceph_connection *con, int resid)
{
int sent;
WARN_ON(!resid || resid > middle_len(con->out_msg));
sent = middle_len(con->out_msg) - resid;
dout("%s con %p sent %d resid %d\n", __func__, con, sent, resid);
if (sent) {
con->v2.out_epil.middle_crc =
crc32c(-1, con->out_msg->middle->vec.iov_base, sent);
con->v2.out_epil.middle_crc =
crc32c_zeros(con->v2.out_epil.middle_crc, resid);
} else {
con->v2.out_epil.middle_crc = crc32c_zeros(-1, resid);
}
con->v2.out_iter.count -= resid;
out_zero_add(con, resid);
}
static void prepare_zero_data(struct ceph_connection *con)
{
dout("%s con %p\n", __func__, con);
con->v2.out_epil.data_crc = crc32c_zeros(-1, data_len(con->out_msg));
out_zero_add(con, data_len(con->out_msg));
}
static void revoke_at_queue_data(struct ceph_connection *con)
{
int boundary;
int resid;
WARN_ON(!data_len(con->out_msg));
WARN_ON(!iov_iter_is_kvec(&con->v2.out_iter));
resid = iov_iter_count(&con->v2.out_iter);
boundary = front_len(con->out_msg) + middle_len(con->out_msg);
if (resid > boundary) {
resid -= boundary;
WARN_ON(resid > MESSAGE_HEAD_PLAIN_LEN);
dout("%s con %p was sending head\n", __func__, con);
if (front_len(con->out_msg))
prepare_zero_front(con, front_len(con->out_msg));
if (middle_len(con->out_msg))
prepare_zero_middle(con, middle_len(con->out_msg));
prepare_zero_data(con);
WARN_ON(iov_iter_count(&con->v2.out_iter) != resid);
con->v2.out_state = OUT_S_QUEUE_ZEROS;
return;
}
boundary = middle_len(con->out_msg);
if (resid > boundary) {
resid -= boundary;
dout("%s con %p was sending front\n", __func__, con);
prepare_zero_front(con, resid);
if (middle_len(con->out_msg))
prepare_zero_middle(con, middle_len(con->out_msg));
prepare_zero_data(con);
queue_zeros(con);
return;
}
WARN_ON(!resid);
dout("%s con %p was sending middle\n", __func__, con);
prepare_zero_middle(con, resid);
prepare_zero_data(con);
queue_zeros(con);
}
static void revoke_at_queue_data_cont(struct ceph_connection *con)
{
int sent, resid; /* current piece of data */
WARN_ON(!data_len(con->out_msg));
WARN_ON(!iov_iter_is_bvec(&con->v2.out_iter));
resid = iov_iter_count(&con->v2.out_iter);
WARN_ON(!resid || resid > con->v2.out_bvec.bv_len);
sent = con->v2.out_bvec.bv_len - resid;
dout("%s con %p sent %d resid %d\n", __func__, con, sent, resid);
if (sent) {
con->v2.out_epil.data_crc = ceph_crc32c_page(
con->v2.out_epil.data_crc, con->v2.out_bvec.bv_page,
con->v2.out_bvec.bv_offset, sent);
ceph_msg_data_advance(&con->v2.out_cursor, sent);
}
WARN_ON(resid > con->v2.out_cursor.total_resid);
con->v2.out_epil.data_crc = crc32c_zeros(con->v2.out_epil.data_crc,
con->v2.out_cursor.total_resid);
con->v2.out_iter.count -= resid;
out_zero_add(con, con->v2.out_cursor.total_resid);
queue_zeros(con);
}
static void revoke_at_finish_message(struct ceph_connection *con)
{
int boundary;
int resid;
WARN_ON(!iov_iter_is_kvec(&con->v2.out_iter));
resid = iov_iter_count(&con->v2.out_iter);
if (!front_len(con->out_msg) && !middle_len(con->out_msg) &&
!data_len(con->out_msg)) {
WARN_ON(!resid || resid > MESSAGE_HEAD_PLAIN_LEN);
dout("%s con %p was sending head (empty message) - noop\n",
__func__, con);
return;
}
boundary = front_len(con->out_msg) + middle_len(con->out_msg) +
CEPH_EPILOGUE_PLAIN_LEN;
if (resid > boundary) {
resid -= boundary;
WARN_ON(resid > MESSAGE_HEAD_PLAIN_LEN);
dout("%s con %p was sending head\n", __func__, con);
if (front_len(con->out_msg))
prepare_zero_front(con, front_len(con->out_msg));
if (middle_len(con->out_msg))
prepare_zero_middle(con, middle_len(con->out_msg));
con->v2.out_iter.count -= CEPH_EPILOGUE_PLAIN_LEN;
WARN_ON(iov_iter_count(&con->v2.out_iter) != resid);
con->v2.out_state = OUT_S_QUEUE_ZEROS;
return;
}
boundary = middle_len(con->out_msg) + CEPH_EPILOGUE_PLAIN_LEN;
if (resid > boundary) {
resid -= boundary;
dout("%s con %p was sending front\n", __func__, con);
prepare_zero_front(con, resid);
if (middle_len(con->out_msg))
prepare_zero_middle(con, middle_len(con->out_msg));
con->v2.out_iter.count -= CEPH_EPILOGUE_PLAIN_LEN;
queue_zeros(con);
return;
}
boundary = CEPH_EPILOGUE_PLAIN_LEN;
if (resid > boundary) {
resid -= boundary;
dout("%s con %p was sending middle\n", __func__, con);
prepare_zero_middle(con, resid);
con->v2.out_iter.count -= CEPH_EPILOGUE_PLAIN_LEN;
queue_zeros(con);
return;
}
WARN_ON(!resid);
dout("%s con %p was sending epilogue - noop\n", __func__, con);
}
void ceph_con_v2_revoke(struct ceph_connection *con)
{
WARN_ON(con->v2.out_zero);
if (con_secure(con)) {
WARN_ON(con->v2.out_state != OUT_S_QUEUE_ENC_PAGE &&
con->v2.out_state != OUT_S_FINISH_MESSAGE);
dout("%s con %p secure - noop\n", __func__, con);
return;
}
switch (con->v2.out_state) {
case OUT_S_QUEUE_DATA:
revoke_at_queue_data(con);
break;
case OUT_S_QUEUE_DATA_CONT:
revoke_at_queue_data_cont(con);
break;
case OUT_S_FINISH_MESSAGE:
revoke_at_finish_message(con);
break;
default:
WARN(1, "bad out_state %d", con->v2.out_state);
break;
}
}
static void revoke_at_prepare_read_data(struct ceph_connection *con)
{
int remaining;
int resid;
WARN_ON(con_secure(con));
WARN_ON(!data_len(con->in_msg));
WARN_ON(!iov_iter_is_kvec(&con->v2.in_iter));
resid = iov_iter_count(&con->v2.in_iter);
WARN_ON(!resid);
remaining = data_len(con->in_msg) + CEPH_EPILOGUE_PLAIN_LEN;
dout("%s con %p resid %d remaining %d\n", __func__, con, resid,
remaining);
con->v2.in_iter.count -= resid;
set_in_skip(con, resid + remaining);
con->v2.in_state = IN_S_FINISH_SKIP;
}
static void revoke_at_prepare_read_data_cont(struct ceph_connection *con)
{
int recved, resid; /* current piece of data */
int remaining;
WARN_ON(con_secure(con));
WARN_ON(!data_len(con->in_msg));
WARN_ON(!iov_iter_is_bvec(&con->v2.in_iter));
resid = iov_iter_count(&con->v2.in_iter);
WARN_ON(!resid || resid > con->v2.in_bvec.bv_len);
recved = con->v2.in_bvec.bv_len - resid;
dout("%s con %p recved %d resid %d\n", __func__, con, recved, resid);
if (recved)
ceph_msg_data_advance(&con->v2.in_cursor, recved);
WARN_ON(resid > con->v2.in_cursor.total_resid);
remaining = CEPH_EPILOGUE_PLAIN_LEN;
dout("%s con %p total_resid %zu remaining %d\n", __func__, con,
con->v2.in_cursor.total_resid, remaining);
con->v2.in_iter.count -= resid;
set_in_skip(con, con->v2.in_cursor.total_resid + remaining);
con->v2.in_state = IN_S_FINISH_SKIP;
}
static void revoke_at_prepare_read_enc_page(struct ceph_connection *con)
{
int resid; /* current enc page (not necessarily data) */
WARN_ON(!con_secure(con));
WARN_ON(!iov_iter_is_bvec(&con->v2.in_iter));
resid = iov_iter_count(&con->v2.in_iter);
WARN_ON(!resid || resid > con->v2.in_bvec.bv_len);
dout("%s con %p resid %d enc_resid %d\n", __func__, con, resid,
con->v2.in_enc_resid);
con->v2.in_iter.count -= resid;
set_in_skip(con, resid + con->v2.in_enc_resid);
con->v2.in_state = IN_S_FINISH_SKIP;
}
static void revoke_at_prepare_sparse_data(struct ceph_connection *con)
{
int resid; /* current piece of data */
int remaining;
WARN_ON(con_secure(con));
WARN_ON(!data_len(con->in_msg));
WARN_ON(!iov_iter_is_bvec(&con->v2.in_iter));
resid = iov_iter_count(&con->v2.in_iter);
dout("%s con %p resid %d\n", __func__, con, resid);
remaining = CEPH_EPILOGUE_PLAIN_LEN + con->v2.data_len_remain;
con->v2.in_iter.count -= resid;
set_in_skip(con, resid + remaining);
con->v2.in_state = IN_S_FINISH_SKIP;
}
static void revoke_at_handle_epilogue(struct ceph_connection *con)
{
int resid;
resid = iov_iter_count(&con->v2.in_iter);
WARN_ON(!resid);
dout("%s con %p resid %d\n", __func__, con, resid);
con->v2.in_iter.count -= resid;
set_in_skip(con, resid);
con->v2.in_state = IN_S_FINISH_SKIP;
}
void ceph_con_v2_revoke_incoming(struct ceph_connection *con)
{
switch (con->v2.in_state) {
case IN_S_PREPARE_SPARSE_DATA:
case IN_S_PREPARE_READ_DATA:
revoke_at_prepare_read_data(con);
break;
case IN_S_PREPARE_READ_DATA_CONT:
revoke_at_prepare_read_data_cont(con);
break;
case IN_S_PREPARE_READ_ENC_PAGE:
revoke_at_prepare_read_enc_page(con);
break;
case IN_S_PREPARE_SPARSE_DATA_CONT:
revoke_at_prepare_sparse_data(con);
break;
case IN_S_HANDLE_EPILOGUE:
revoke_at_handle_epilogue(con);
break;
default:
WARN(1, "bad in_state %d", con->v2.in_state);
break;
}
}
bool ceph_con_v2_opened(struct ceph_connection *con)
{
return con->v2.peer_global_seq;
}
void ceph_con_v2_reset_session(struct ceph_connection *con)
{
con->v2.client_cookie = 0;
con->v2.server_cookie = 0;
con->v2.global_seq = 0;
con->v2.connect_seq = 0;
con->v2.peer_global_seq = 0;
}
void ceph_con_v2_reset_protocol(struct ceph_connection *con)
{
iov_iter_truncate(&con->v2.in_iter, 0);
iov_iter_truncate(&con->v2.out_iter, 0);
con->v2.out_zero = 0;
clear_in_sign_kvecs(con);
clear_out_sign_kvecs(con);
free_conn_bufs(con);
if (con->v2.in_enc_pages) {
WARN_ON(!con->v2.in_enc_page_cnt);
ceph_release_page_vector(con->v2.in_enc_pages,
con->v2.in_enc_page_cnt);
con->v2.in_enc_pages = NULL;
con->v2.in_enc_page_cnt = 0;
}
if (con->v2.out_enc_pages) {
WARN_ON(!con->v2.out_enc_page_cnt);
ceph_release_page_vector(con->v2.out_enc_pages,
con->v2.out_enc_page_cnt);
con->v2.out_enc_pages = NULL;
con->v2.out_enc_page_cnt = 0;
}
con->v2.con_mode = CEPH_CON_MODE_UNKNOWN;
memzero_explicit(&con->v2.in_gcm_nonce, CEPH_GCM_IV_LEN);
memzero_explicit(&con->v2.out_gcm_nonce, CEPH_GCM_IV_LEN);
if (con->v2.hmac_tfm) {
crypto_free_shash(con->v2.hmac_tfm);
con->v2.hmac_tfm = NULL;
}
if (con->v2.gcm_req) {
aead_request_free(con->v2.gcm_req);
con->v2.gcm_req = NULL;
}
if (con->v2.gcm_tfm) {
crypto_free_aead(con->v2.gcm_tfm);
con->v2.gcm_tfm = NULL;
}
}
| linux-master | net/ceph/messenger_v2.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/ceph/ceph_debug.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/ceph/libceph.h>
#include <linux/ceph/osdmap.h>
#include <linux/ceph/decode.h>
#include <linux/crush/hash.h>
#include <linux/crush/mapper.h>
static __printf(2, 3)
void osdmap_info(const struct ceph_osdmap *map, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
printk(KERN_INFO "%s (%pU e%u): %pV", KBUILD_MODNAME, &map->fsid,
map->epoch, &vaf);
va_end(args);
}
char *ceph_osdmap_state_str(char *str, int len, u32 state)
{
if (!len)
return str;
if ((state & CEPH_OSD_EXISTS) && (state & CEPH_OSD_UP))
snprintf(str, len, "exists, up");
else if (state & CEPH_OSD_EXISTS)
snprintf(str, len, "exists");
else if (state & CEPH_OSD_UP)
snprintf(str, len, "up");
else
snprintf(str, len, "doesn't exist");
return str;
}
/* maps */
static int calc_bits_of(unsigned int t)
{
int b = 0;
while (t) {
t = t >> 1;
b++;
}
return b;
}
/*
* the foo_mask is the smallest value 2^n-1 that is >= foo.
*/
static void calc_pg_masks(struct ceph_pg_pool_info *pi)
{
pi->pg_num_mask = (1 << calc_bits_of(pi->pg_num-1)) - 1;
pi->pgp_num_mask = (1 << calc_bits_of(pi->pgp_num-1)) - 1;
}
/*
* decode crush map
*/
static int crush_decode_uniform_bucket(void **p, void *end,
struct crush_bucket_uniform *b)
{
dout("crush_decode_uniform_bucket %p to %p\n", *p, end);
ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad);
b->item_weight = ceph_decode_32(p);
return 0;
bad:
return -EINVAL;
}
static int crush_decode_list_bucket(void **p, void *end,
struct crush_bucket_list *b)
{
int j;
dout("crush_decode_list_bucket %p to %p\n", *p, end);
b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
if (b->item_weights == NULL)
return -ENOMEM;
b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
if (b->sum_weights == NULL)
return -ENOMEM;
ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
for (j = 0; j < b->h.size; j++) {
b->item_weights[j] = ceph_decode_32(p);
b->sum_weights[j] = ceph_decode_32(p);
}
return 0;
bad:
return -EINVAL;
}
static int crush_decode_tree_bucket(void **p, void *end,
struct crush_bucket_tree *b)
{
int j;
dout("crush_decode_tree_bucket %p to %p\n", *p, end);
ceph_decode_8_safe(p, end, b->num_nodes, bad);
b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS);
if (b->node_weights == NULL)
return -ENOMEM;
ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad);
for (j = 0; j < b->num_nodes; j++)
b->node_weights[j] = ceph_decode_32(p);
return 0;
bad:
return -EINVAL;
}
static int crush_decode_straw_bucket(void **p, void *end,
struct crush_bucket_straw *b)
{
int j;
dout("crush_decode_straw_bucket %p to %p\n", *p, end);
b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
if (b->item_weights == NULL)
return -ENOMEM;
b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
if (b->straws == NULL)
return -ENOMEM;
ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
for (j = 0; j < b->h.size; j++) {
b->item_weights[j] = ceph_decode_32(p);
b->straws[j] = ceph_decode_32(p);
}
return 0;
bad:
return -EINVAL;
}
static int crush_decode_straw2_bucket(void **p, void *end,
struct crush_bucket_straw2 *b)
{
int j;
dout("crush_decode_straw2_bucket %p to %p\n", *p, end);
b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
if (b->item_weights == NULL)
return -ENOMEM;
ceph_decode_need(p, end, b->h.size * sizeof(u32), bad);
for (j = 0; j < b->h.size; j++)
b->item_weights[j] = ceph_decode_32(p);
return 0;
bad:
return -EINVAL;
}
struct crush_name_node {
struct rb_node cn_node;
int cn_id;
char cn_name[];
};
static struct crush_name_node *alloc_crush_name(size_t name_len)
{
struct crush_name_node *cn;
cn = kmalloc(sizeof(*cn) + name_len + 1, GFP_NOIO);
if (!cn)
return NULL;
RB_CLEAR_NODE(&cn->cn_node);
return cn;
}
static void free_crush_name(struct crush_name_node *cn)
{
WARN_ON(!RB_EMPTY_NODE(&cn->cn_node));
kfree(cn);
}
DEFINE_RB_FUNCS(crush_name, struct crush_name_node, cn_id, cn_node)
static int decode_crush_names(void **p, void *end, struct rb_root *root)
{
u32 n;
ceph_decode_32_safe(p, end, n, e_inval);
while (n--) {
struct crush_name_node *cn;
int id;
u32 name_len;
ceph_decode_32_safe(p, end, id, e_inval);
ceph_decode_32_safe(p, end, name_len, e_inval);
ceph_decode_need(p, end, name_len, e_inval);
cn = alloc_crush_name(name_len);
if (!cn)
return -ENOMEM;
cn->cn_id = id;
memcpy(cn->cn_name, *p, name_len);
cn->cn_name[name_len] = '\0';
*p += name_len;
if (!__insert_crush_name(root, cn)) {
free_crush_name(cn);
return -EEXIST;
}
}
return 0;
e_inval:
return -EINVAL;
}
void clear_crush_names(struct rb_root *root)
{
while (!RB_EMPTY_ROOT(root)) {
struct crush_name_node *cn =
rb_entry(rb_first(root), struct crush_name_node, cn_node);
erase_crush_name(root, cn);
free_crush_name(cn);
}
}
static struct crush_choose_arg_map *alloc_choose_arg_map(void)
{
struct crush_choose_arg_map *arg_map;
arg_map = kzalloc(sizeof(*arg_map), GFP_NOIO);
if (!arg_map)
return NULL;
RB_CLEAR_NODE(&arg_map->node);
return arg_map;
}
static void free_choose_arg_map(struct crush_choose_arg_map *arg_map)
{
if (arg_map) {
int i, j;
WARN_ON(!RB_EMPTY_NODE(&arg_map->node));
for (i = 0; i < arg_map->size; i++) {
struct crush_choose_arg *arg = &arg_map->args[i];
for (j = 0; j < arg->weight_set_size; j++)
kfree(arg->weight_set[j].weights);
kfree(arg->weight_set);
kfree(arg->ids);
}
kfree(arg_map->args);
kfree(arg_map);
}
}
DEFINE_RB_FUNCS(choose_arg_map, struct crush_choose_arg_map, choose_args_index,
node);
void clear_choose_args(struct crush_map *c)
{
while (!RB_EMPTY_ROOT(&c->choose_args)) {
struct crush_choose_arg_map *arg_map =
rb_entry(rb_first(&c->choose_args),
struct crush_choose_arg_map, node);
erase_choose_arg_map(&c->choose_args, arg_map);
free_choose_arg_map(arg_map);
}
}
static u32 *decode_array_32_alloc(void **p, void *end, u32 *plen)
{
u32 *a = NULL;
u32 len;
int ret;
ceph_decode_32_safe(p, end, len, e_inval);
if (len) {
u32 i;
a = kmalloc_array(len, sizeof(u32), GFP_NOIO);
if (!a) {
ret = -ENOMEM;
goto fail;
}
ceph_decode_need(p, end, len * sizeof(u32), e_inval);
for (i = 0; i < len; i++)
a[i] = ceph_decode_32(p);
}
*plen = len;
return a;
e_inval:
ret = -EINVAL;
fail:
kfree(a);
return ERR_PTR(ret);
}
/*
* Assumes @arg is zero-initialized.
*/
static int decode_choose_arg(void **p, void *end, struct crush_choose_arg *arg)
{
int ret;
ceph_decode_32_safe(p, end, arg->weight_set_size, e_inval);
if (arg->weight_set_size) {
u32 i;
arg->weight_set = kmalloc_array(arg->weight_set_size,
sizeof(*arg->weight_set),
GFP_NOIO);
if (!arg->weight_set)
return -ENOMEM;
for (i = 0; i < arg->weight_set_size; i++) {
struct crush_weight_set *w = &arg->weight_set[i];
w->weights = decode_array_32_alloc(p, end, &w->size);
if (IS_ERR(w->weights)) {
ret = PTR_ERR(w->weights);
w->weights = NULL;
return ret;
}
}
}
arg->ids = decode_array_32_alloc(p, end, &arg->ids_size);
if (IS_ERR(arg->ids)) {
ret = PTR_ERR(arg->ids);
arg->ids = NULL;
return ret;
}
return 0;
e_inval:
return -EINVAL;
}
static int decode_choose_args(void **p, void *end, struct crush_map *c)
{
struct crush_choose_arg_map *arg_map = NULL;
u32 num_choose_arg_maps, num_buckets;
int ret;
ceph_decode_32_safe(p, end, num_choose_arg_maps, e_inval);
while (num_choose_arg_maps--) {
arg_map = alloc_choose_arg_map();
if (!arg_map) {
ret = -ENOMEM;
goto fail;
}
ceph_decode_64_safe(p, end, arg_map->choose_args_index,
e_inval);
arg_map->size = c->max_buckets;
arg_map->args = kcalloc(arg_map->size, sizeof(*arg_map->args),
GFP_NOIO);
if (!arg_map->args) {
ret = -ENOMEM;
goto fail;
}
ceph_decode_32_safe(p, end, num_buckets, e_inval);
while (num_buckets--) {
struct crush_choose_arg *arg;
u32 bucket_index;
ceph_decode_32_safe(p, end, bucket_index, e_inval);
if (bucket_index >= arg_map->size)
goto e_inval;
arg = &arg_map->args[bucket_index];
ret = decode_choose_arg(p, end, arg);
if (ret)
goto fail;
if (arg->ids_size &&
arg->ids_size != c->buckets[bucket_index]->size)
goto e_inval;
}
insert_choose_arg_map(&c->choose_args, arg_map);
}
return 0;
e_inval:
ret = -EINVAL;
fail:
free_choose_arg_map(arg_map);
return ret;
}
static void crush_finalize(struct crush_map *c)
{
__s32 b;
/* Space for the array of pointers to per-bucket workspace */
c->working_size = sizeof(struct crush_work) +
c->max_buckets * sizeof(struct crush_work_bucket *);
for (b = 0; b < c->max_buckets; b++) {
if (!c->buckets[b])
continue;
switch (c->buckets[b]->alg) {
default:
/*
* The base case, permutation variables and
* the pointer to the permutation array.
*/
c->working_size += sizeof(struct crush_work_bucket);
break;
}
/* Every bucket has a permutation array. */
c->working_size += c->buckets[b]->size * sizeof(__u32);
}
}
static struct crush_map *crush_decode(void *pbyval, void *end)
{
struct crush_map *c;
int err;
int i, j;
void **p = &pbyval;
void *start = pbyval;
u32 magic;
dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p));
c = kzalloc(sizeof(*c), GFP_NOFS);
if (c == NULL)
return ERR_PTR(-ENOMEM);
c->type_names = RB_ROOT;
c->names = RB_ROOT;
c->choose_args = RB_ROOT;
/* set tunables to default values */
c->choose_local_tries = 2;
c->choose_local_fallback_tries = 5;
c->choose_total_tries = 19;
c->chooseleaf_descend_once = 0;
ceph_decode_need(p, end, 4*sizeof(u32), bad);
magic = ceph_decode_32(p);
if (magic != CRUSH_MAGIC) {
pr_err("crush_decode magic %x != current %x\n",
(unsigned int)magic, (unsigned int)CRUSH_MAGIC);
goto bad;
}
c->max_buckets = ceph_decode_32(p);
c->max_rules = ceph_decode_32(p);
c->max_devices = ceph_decode_32(p);
c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS);
if (c->buckets == NULL)
goto badmem;
c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS);
if (c->rules == NULL)
goto badmem;
/* buckets */
for (i = 0; i < c->max_buckets; i++) {
int size = 0;
u32 alg;
struct crush_bucket *b;
ceph_decode_32_safe(p, end, alg, bad);
if (alg == 0) {
c->buckets[i] = NULL;
continue;
}
dout("crush_decode bucket %d off %x %p to %p\n",
i, (int)(*p-start), *p, end);
switch (alg) {
case CRUSH_BUCKET_UNIFORM:
size = sizeof(struct crush_bucket_uniform);
break;
case CRUSH_BUCKET_LIST:
size = sizeof(struct crush_bucket_list);
break;
case CRUSH_BUCKET_TREE:
size = sizeof(struct crush_bucket_tree);
break;
case CRUSH_BUCKET_STRAW:
size = sizeof(struct crush_bucket_straw);
break;
case CRUSH_BUCKET_STRAW2:
size = sizeof(struct crush_bucket_straw2);
break;
default:
goto bad;
}
BUG_ON(size == 0);
b = c->buckets[i] = kzalloc(size, GFP_NOFS);
if (b == NULL)
goto badmem;
ceph_decode_need(p, end, 4*sizeof(u32), bad);
b->id = ceph_decode_32(p);
b->type = ceph_decode_16(p);
b->alg = ceph_decode_8(p);
b->hash = ceph_decode_8(p);
b->weight = ceph_decode_32(p);
b->size = ceph_decode_32(p);
dout("crush_decode bucket size %d off %x %p to %p\n",
b->size, (int)(*p-start), *p, end);
b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS);
if (b->items == NULL)
goto badmem;
ceph_decode_need(p, end, b->size*sizeof(u32), bad);
for (j = 0; j < b->size; j++)
b->items[j] = ceph_decode_32(p);
switch (b->alg) {
case CRUSH_BUCKET_UNIFORM:
err = crush_decode_uniform_bucket(p, end,
(struct crush_bucket_uniform *)b);
if (err < 0)
goto fail;
break;
case CRUSH_BUCKET_LIST:
err = crush_decode_list_bucket(p, end,
(struct crush_bucket_list *)b);
if (err < 0)
goto fail;
break;
case CRUSH_BUCKET_TREE:
err = crush_decode_tree_bucket(p, end,
(struct crush_bucket_tree *)b);
if (err < 0)
goto fail;
break;
case CRUSH_BUCKET_STRAW:
err = crush_decode_straw_bucket(p, end,
(struct crush_bucket_straw *)b);
if (err < 0)
goto fail;
break;
case CRUSH_BUCKET_STRAW2:
err = crush_decode_straw2_bucket(p, end,
(struct crush_bucket_straw2 *)b);
if (err < 0)
goto fail;
break;
}
}
/* rules */
dout("rule vec is %p\n", c->rules);
for (i = 0; i < c->max_rules; i++) {
u32 yes;
struct crush_rule *r;
ceph_decode_32_safe(p, end, yes, bad);
if (!yes) {
dout("crush_decode NO rule %d off %x %p to %p\n",
i, (int)(*p-start), *p, end);
c->rules[i] = NULL;
continue;
}
dout("crush_decode rule %d off %x %p to %p\n",
i, (int)(*p-start), *p, end);
/* len */
ceph_decode_32_safe(p, end, yes, bad);
#if BITS_PER_LONG == 32
if (yes > (ULONG_MAX - sizeof(*r))
/ sizeof(struct crush_rule_step))
goto bad;
#endif
r = kmalloc(struct_size(r, steps, yes), GFP_NOFS);
if (r == NULL)
goto badmem;
dout(" rule %d is at %p\n", i, r);
c->rules[i] = r;
r->len = yes;
ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */
ceph_decode_need(p, end, r->len*3*sizeof(u32), bad);
for (j = 0; j < r->len; j++) {
r->steps[j].op = ceph_decode_32(p);
r->steps[j].arg1 = ceph_decode_32(p);
r->steps[j].arg2 = ceph_decode_32(p);
}
}
err = decode_crush_names(p, end, &c->type_names);
if (err)
goto fail;
err = decode_crush_names(p, end, &c->names);
if (err)
goto fail;
ceph_decode_skip_map(p, end, 32, string, bad); /* rule_name_map */
/* tunables */
ceph_decode_need(p, end, 3*sizeof(u32), done);
c->choose_local_tries = ceph_decode_32(p);
c->choose_local_fallback_tries = ceph_decode_32(p);
c->choose_total_tries = ceph_decode_32(p);
dout("crush decode tunable choose_local_tries = %d\n",
c->choose_local_tries);
dout("crush decode tunable choose_local_fallback_tries = %d\n",
c->choose_local_fallback_tries);
dout("crush decode tunable choose_total_tries = %d\n",
c->choose_total_tries);
ceph_decode_need(p, end, sizeof(u32), done);
c->chooseleaf_descend_once = ceph_decode_32(p);
dout("crush decode tunable chooseleaf_descend_once = %d\n",
c->chooseleaf_descend_once);
ceph_decode_need(p, end, sizeof(u8), done);
c->chooseleaf_vary_r = ceph_decode_8(p);
dout("crush decode tunable chooseleaf_vary_r = %d\n",
c->chooseleaf_vary_r);
/* skip straw_calc_version, allowed_bucket_algs */
ceph_decode_need(p, end, sizeof(u8) + sizeof(u32), done);
*p += sizeof(u8) + sizeof(u32);
ceph_decode_need(p, end, sizeof(u8), done);
c->chooseleaf_stable = ceph_decode_8(p);
dout("crush decode tunable chooseleaf_stable = %d\n",
c->chooseleaf_stable);
if (*p != end) {
/* class_map */
ceph_decode_skip_map(p, end, 32, 32, bad);
/* class_name */
ceph_decode_skip_map(p, end, 32, string, bad);
/* class_bucket */
ceph_decode_skip_map_of_map(p, end, 32, 32, 32, bad);
}
if (*p != end) {
err = decode_choose_args(p, end, c);
if (err)
goto fail;
}
done:
crush_finalize(c);
dout("crush_decode success\n");
return c;
badmem:
err = -ENOMEM;
fail:
dout("crush_decode fail %d\n", err);
crush_destroy(c);
return ERR_PTR(err);
bad:
err = -EINVAL;
goto fail;
}
int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs)
{
if (lhs->pool < rhs->pool)
return -1;
if (lhs->pool > rhs->pool)
return 1;
if (lhs->seed < rhs->seed)
return -1;
if (lhs->seed > rhs->seed)
return 1;
return 0;
}
int ceph_spg_compare(const struct ceph_spg *lhs, const struct ceph_spg *rhs)
{
int ret;
ret = ceph_pg_compare(&lhs->pgid, &rhs->pgid);
if (ret)
return ret;
if (lhs->shard < rhs->shard)
return -1;
if (lhs->shard > rhs->shard)
return 1;
return 0;
}
static struct ceph_pg_mapping *alloc_pg_mapping(size_t payload_len)
{
struct ceph_pg_mapping *pg;
pg = kmalloc(sizeof(*pg) + payload_len, GFP_NOIO);
if (!pg)
return NULL;
RB_CLEAR_NODE(&pg->node);
return pg;
}
static void free_pg_mapping(struct ceph_pg_mapping *pg)
{
WARN_ON(!RB_EMPTY_NODE(&pg->node));
kfree(pg);
}
/*
* rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
* to a set of osds) and primary_temp (explicit primary setting)
*/
DEFINE_RB_FUNCS2(pg_mapping, struct ceph_pg_mapping, pgid, ceph_pg_compare,
RB_BYPTR, const struct ceph_pg *, node)
/*
* rbtree of pg pool info
*/
DEFINE_RB_FUNCS(pg_pool, struct ceph_pg_pool_info, id, node)
struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map, u64 id)
{
return lookup_pg_pool(&map->pg_pools, id);
}
const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id)
{
struct ceph_pg_pool_info *pi;
if (id == CEPH_NOPOOL)
return NULL;
if (WARN_ON_ONCE(id > (u64) INT_MAX))
return NULL;
pi = lookup_pg_pool(&map->pg_pools, id);
return pi ? pi->name : NULL;
}
EXPORT_SYMBOL(ceph_pg_pool_name_by_id);
int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name)
{
struct rb_node *rbp;
for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) {
struct ceph_pg_pool_info *pi =
rb_entry(rbp, struct ceph_pg_pool_info, node);
if (pi->name && strcmp(pi->name, name) == 0)
return pi->id;
}
return -ENOENT;
}
EXPORT_SYMBOL(ceph_pg_poolid_by_name);
u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id)
{
struct ceph_pg_pool_info *pi;
pi = lookup_pg_pool(&map->pg_pools, id);
return pi ? pi->flags : 0;
}
EXPORT_SYMBOL(ceph_pg_pool_flags);
static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
{
erase_pg_pool(root, pi);
kfree(pi->name);
kfree(pi);
}
static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
{
u8 ev, cv;
unsigned len, num;
void *pool_end;
ceph_decode_need(p, end, 2 + 4, bad);
ev = ceph_decode_8(p); /* encoding version */
cv = ceph_decode_8(p); /* compat version */
if (ev < 5) {
pr_warn("got v %d < 5 cv %d of ceph_pg_pool\n", ev, cv);
return -EINVAL;
}
if (cv > 9) {
pr_warn("got v %d cv %d > 9 of ceph_pg_pool\n", ev, cv);
return -EINVAL;
}
len = ceph_decode_32(p);
ceph_decode_need(p, end, len, bad);
pool_end = *p + len;
pi->type = ceph_decode_8(p);
pi->size = ceph_decode_8(p);
pi->crush_ruleset = ceph_decode_8(p);
pi->object_hash = ceph_decode_8(p);
pi->pg_num = ceph_decode_32(p);
pi->pgp_num = ceph_decode_32(p);
*p += 4 + 4; /* skip lpg* */
*p += 4; /* skip last_change */
*p += 8 + 4; /* skip snap_seq, snap_epoch */
/* skip snaps */
num = ceph_decode_32(p);
while (num--) {
*p += 8; /* snapid key */
*p += 1 + 1; /* versions */
len = ceph_decode_32(p);
*p += len;
}
/* skip removed_snaps */
num = ceph_decode_32(p);
*p += num * (8 + 8);
*p += 8; /* skip auid */
pi->flags = ceph_decode_64(p);
*p += 4; /* skip crash_replay_interval */
if (ev >= 7)
pi->min_size = ceph_decode_8(p);
else
pi->min_size = pi->size - pi->size / 2;
if (ev >= 8)
*p += 8 + 8; /* skip quota_max_* */
if (ev >= 9) {
/* skip tiers */
num = ceph_decode_32(p);
*p += num * 8;
*p += 8; /* skip tier_of */
*p += 1; /* skip cache_mode */
pi->read_tier = ceph_decode_64(p);
pi->write_tier = ceph_decode_64(p);
} else {
pi->read_tier = -1;
pi->write_tier = -1;
}
if (ev >= 10) {
/* skip properties */
num = ceph_decode_32(p);
while (num--) {
len = ceph_decode_32(p);
*p += len; /* key */
len = ceph_decode_32(p);
*p += len; /* val */
}
}
if (ev >= 11) {
/* skip hit_set_params */
*p += 1 + 1; /* versions */
len = ceph_decode_32(p);
*p += len;
*p += 4; /* skip hit_set_period */
*p += 4; /* skip hit_set_count */
}
if (ev >= 12)
*p += 4; /* skip stripe_width */
if (ev >= 13) {
*p += 8; /* skip target_max_bytes */
*p += 8; /* skip target_max_objects */
*p += 4; /* skip cache_target_dirty_ratio_micro */
*p += 4; /* skip cache_target_full_ratio_micro */
*p += 4; /* skip cache_min_flush_age */
*p += 4; /* skip cache_min_evict_age */
}
if (ev >= 14) {
/* skip erasure_code_profile */
len = ceph_decode_32(p);
*p += len;
}
/*
* last_force_op_resend_preluminous, will be overridden if the
* map was encoded with RESEND_ON_SPLIT
*/
if (ev >= 15)
pi->last_force_request_resend = ceph_decode_32(p);
else
pi->last_force_request_resend = 0;
if (ev >= 16)
*p += 4; /* skip min_read_recency_for_promote */
if (ev >= 17)
*p += 8; /* skip expected_num_objects */
if (ev >= 19)
*p += 4; /* skip cache_target_dirty_high_ratio_micro */
if (ev >= 20)
*p += 4; /* skip min_write_recency_for_promote */
if (ev >= 21)
*p += 1; /* skip use_gmt_hitset */
if (ev >= 22)
*p += 1; /* skip fast_read */
if (ev >= 23) {
*p += 4; /* skip hit_set_grade_decay_rate */
*p += 4; /* skip hit_set_search_last_n */
}
if (ev >= 24) {
/* skip opts */
*p += 1 + 1; /* versions */
len = ceph_decode_32(p);
*p += len;
}
if (ev >= 25)
pi->last_force_request_resend = ceph_decode_32(p);
/* ignore the rest */
*p = pool_end;
calc_pg_masks(pi);
return 0;
bad:
return -EINVAL;
}
static int decode_pool_names(void **p, void *end, struct ceph_osdmap *map)
{
struct ceph_pg_pool_info *pi;
u32 num, len;
u64 pool;
ceph_decode_32_safe(p, end, num, bad);
dout(" %d pool names\n", num);
while (num--) {
ceph_decode_64_safe(p, end, pool, bad);
ceph_decode_32_safe(p, end, len, bad);
dout(" pool %llu len %d\n", pool, len);
ceph_decode_need(p, end, len, bad);
pi = lookup_pg_pool(&map->pg_pools, pool);
if (pi) {
char *name = kstrndup(*p, len, GFP_NOFS);
if (!name)
return -ENOMEM;
kfree(pi->name);
pi->name = name;
dout(" name is %s\n", pi->name);
}
*p += len;
}
return 0;
bad:
return -EINVAL;
}
/*
* CRUSH workspaces
*
* workspace_manager framework borrowed from fs/btrfs/compression.c.
* Two simplifications: there is only one type of workspace and there
* is always at least one workspace.
*/
static struct crush_work *alloc_workspace(const struct crush_map *c)
{
struct crush_work *work;
size_t work_size;
WARN_ON(!c->working_size);
work_size = crush_work_size(c, CEPH_PG_MAX_SIZE);
dout("%s work_size %zu bytes\n", __func__, work_size);
work = kvmalloc(work_size, GFP_NOIO);
if (!work)
return NULL;
INIT_LIST_HEAD(&work->item);
crush_init_workspace(c, work);
return work;
}
static void free_workspace(struct crush_work *work)
{
WARN_ON(!list_empty(&work->item));
kvfree(work);
}
static void init_workspace_manager(struct workspace_manager *wsm)
{
INIT_LIST_HEAD(&wsm->idle_ws);
spin_lock_init(&wsm->ws_lock);
atomic_set(&wsm->total_ws, 0);
wsm->free_ws = 0;
init_waitqueue_head(&wsm->ws_wait);
}
static void add_initial_workspace(struct workspace_manager *wsm,
struct crush_work *work)
{
WARN_ON(!list_empty(&wsm->idle_ws));
list_add(&work->item, &wsm->idle_ws);
atomic_set(&wsm->total_ws, 1);
wsm->free_ws = 1;
}
static void cleanup_workspace_manager(struct workspace_manager *wsm)
{
struct crush_work *work;
while (!list_empty(&wsm->idle_ws)) {
work = list_first_entry(&wsm->idle_ws, struct crush_work,
item);
list_del_init(&work->item);
free_workspace(work);
}
atomic_set(&wsm->total_ws, 0);
wsm->free_ws = 0;
}
/*
* Finds an available workspace or allocates a new one. If it's not
* possible to allocate a new one, waits until there is one.
*/
static struct crush_work *get_workspace(struct workspace_manager *wsm,
const struct crush_map *c)
{
struct crush_work *work;
int cpus = num_online_cpus();
again:
spin_lock(&wsm->ws_lock);
if (!list_empty(&wsm->idle_ws)) {
work = list_first_entry(&wsm->idle_ws, struct crush_work,
item);
list_del_init(&work->item);
wsm->free_ws--;
spin_unlock(&wsm->ws_lock);
return work;
}
if (atomic_read(&wsm->total_ws) > cpus) {
DEFINE_WAIT(wait);
spin_unlock(&wsm->ws_lock);
prepare_to_wait(&wsm->ws_wait, &wait, TASK_UNINTERRUPTIBLE);
if (atomic_read(&wsm->total_ws) > cpus && !wsm->free_ws)
schedule();
finish_wait(&wsm->ws_wait, &wait);
goto again;
}
atomic_inc(&wsm->total_ws);
spin_unlock(&wsm->ws_lock);
work = alloc_workspace(c);
if (!work) {
atomic_dec(&wsm->total_ws);
wake_up(&wsm->ws_wait);
/*
* Do not return the error but go back to waiting. We
* have the initial workspace and the CRUSH computation
* time is bounded so we will get it eventually.
*/
WARN_ON(atomic_read(&wsm->total_ws) < 1);
goto again;
}
return work;
}
/*
* Puts a workspace back on the list or frees it if we have enough
* idle ones sitting around.
*/
static void put_workspace(struct workspace_manager *wsm,
struct crush_work *work)
{
spin_lock(&wsm->ws_lock);
if (wsm->free_ws <= num_online_cpus()) {
list_add(&work->item, &wsm->idle_ws);
wsm->free_ws++;
spin_unlock(&wsm->ws_lock);
goto wake;
}
spin_unlock(&wsm->ws_lock);
free_workspace(work);
atomic_dec(&wsm->total_ws);
wake:
if (wq_has_sleeper(&wsm->ws_wait))
wake_up(&wsm->ws_wait);
}
/*
* osd map
*/
struct ceph_osdmap *ceph_osdmap_alloc(void)
{
struct ceph_osdmap *map;
map = kzalloc(sizeof(*map), GFP_NOIO);
if (!map)
return NULL;
map->pg_pools = RB_ROOT;
map->pool_max = -1;
map->pg_temp = RB_ROOT;
map->primary_temp = RB_ROOT;
map->pg_upmap = RB_ROOT;
map->pg_upmap_items = RB_ROOT;
init_workspace_manager(&map->crush_wsm);
return map;
}
void ceph_osdmap_destroy(struct ceph_osdmap *map)
{
dout("osdmap_destroy %p\n", map);
if (map->crush)
crush_destroy(map->crush);
cleanup_workspace_manager(&map->crush_wsm);
while (!RB_EMPTY_ROOT(&map->pg_temp)) {
struct ceph_pg_mapping *pg =
rb_entry(rb_first(&map->pg_temp),
struct ceph_pg_mapping, node);
erase_pg_mapping(&map->pg_temp, pg);
free_pg_mapping(pg);
}
while (!RB_EMPTY_ROOT(&map->primary_temp)) {
struct ceph_pg_mapping *pg =
rb_entry(rb_first(&map->primary_temp),
struct ceph_pg_mapping, node);
erase_pg_mapping(&map->primary_temp, pg);
free_pg_mapping(pg);
}
while (!RB_EMPTY_ROOT(&map->pg_upmap)) {
struct ceph_pg_mapping *pg =
rb_entry(rb_first(&map->pg_upmap),
struct ceph_pg_mapping, node);
rb_erase(&pg->node, &map->pg_upmap);
kfree(pg);
}
while (!RB_EMPTY_ROOT(&map->pg_upmap_items)) {
struct ceph_pg_mapping *pg =
rb_entry(rb_first(&map->pg_upmap_items),
struct ceph_pg_mapping, node);
rb_erase(&pg->node, &map->pg_upmap_items);
kfree(pg);
}
while (!RB_EMPTY_ROOT(&map->pg_pools)) {
struct ceph_pg_pool_info *pi =
rb_entry(rb_first(&map->pg_pools),
struct ceph_pg_pool_info, node);
__remove_pg_pool(&map->pg_pools, pi);
}
kvfree(map->osd_state);
kvfree(map->osd_weight);
kvfree(map->osd_addr);
kvfree(map->osd_primary_affinity);
kfree(map);
}
/*
* Adjust max_osd value, (re)allocate arrays.
*
* The new elements are properly initialized.
*/
static int osdmap_set_max_osd(struct ceph_osdmap *map, u32 max)
{
u32 *state;
u32 *weight;
struct ceph_entity_addr *addr;
u32 to_copy;
int i;
dout("%s old %u new %u\n", __func__, map->max_osd, max);
if (max == map->max_osd)
return 0;
state = kvmalloc(array_size(max, sizeof(*state)), GFP_NOFS);
weight = kvmalloc(array_size(max, sizeof(*weight)), GFP_NOFS);
addr = kvmalloc(array_size(max, sizeof(*addr)), GFP_NOFS);
if (!state || !weight || !addr) {
kvfree(state);
kvfree(weight);
kvfree(addr);
return -ENOMEM;
}
to_copy = min(map->max_osd, max);
if (map->osd_state) {
memcpy(state, map->osd_state, to_copy * sizeof(*state));
memcpy(weight, map->osd_weight, to_copy * sizeof(*weight));
memcpy(addr, map->osd_addr, to_copy * sizeof(*addr));
kvfree(map->osd_state);
kvfree(map->osd_weight);
kvfree(map->osd_addr);
}
map->osd_state = state;
map->osd_weight = weight;
map->osd_addr = addr;
for (i = map->max_osd; i < max; i++) {
map->osd_state[i] = 0;
map->osd_weight[i] = CEPH_OSD_OUT;
memset(map->osd_addr + i, 0, sizeof(*map->osd_addr));
}
if (map->osd_primary_affinity) {
u32 *affinity;
affinity = kvmalloc(array_size(max, sizeof(*affinity)),
GFP_NOFS);
if (!affinity)
return -ENOMEM;
memcpy(affinity, map->osd_primary_affinity,
to_copy * sizeof(*affinity));
kvfree(map->osd_primary_affinity);
map->osd_primary_affinity = affinity;
for (i = map->max_osd; i < max; i++)
map->osd_primary_affinity[i] =
CEPH_OSD_DEFAULT_PRIMARY_AFFINITY;
}
map->max_osd = max;
return 0;
}
static int osdmap_set_crush(struct ceph_osdmap *map, struct crush_map *crush)
{
struct crush_work *work;
if (IS_ERR(crush))
return PTR_ERR(crush);
work = alloc_workspace(crush);
if (!work) {
crush_destroy(crush);
return -ENOMEM;
}
if (map->crush)
crush_destroy(map->crush);
cleanup_workspace_manager(&map->crush_wsm);
map->crush = crush;
add_initial_workspace(&map->crush_wsm, work);
return 0;
}
#define OSDMAP_WRAPPER_COMPAT_VER 7
#define OSDMAP_CLIENT_DATA_COMPAT_VER 1
/*
* Return 0 or error. On success, *v is set to 0 for old (v6) osdmaps,
* to struct_v of the client_data section for new (v7 and above)
* osdmaps.
*/
static int get_osdmap_client_data_v(void **p, void *end,
const char *prefix, u8 *v)
{
u8 struct_v;
ceph_decode_8_safe(p, end, struct_v, e_inval);
if (struct_v >= 7) {
u8 struct_compat;
ceph_decode_8_safe(p, end, struct_compat, e_inval);
if (struct_compat > OSDMAP_WRAPPER_COMPAT_VER) {
pr_warn("got v %d cv %d > %d of %s ceph_osdmap\n",
struct_v, struct_compat,
OSDMAP_WRAPPER_COMPAT_VER, prefix);
return -EINVAL;
}
*p += 4; /* ignore wrapper struct_len */
ceph_decode_8_safe(p, end, struct_v, e_inval);
ceph_decode_8_safe(p, end, struct_compat, e_inval);
if (struct_compat > OSDMAP_CLIENT_DATA_COMPAT_VER) {
pr_warn("got v %d cv %d > %d of %s ceph_osdmap client data\n",
struct_v, struct_compat,
OSDMAP_CLIENT_DATA_COMPAT_VER, prefix);
return -EINVAL;
}
*p += 4; /* ignore client data struct_len */
} else {
u16 version;
*p -= 1;
ceph_decode_16_safe(p, end, version, e_inval);
if (version < 6) {
pr_warn("got v %d < 6 of %s ceph_osdmap\n",
version, prefix);
return -EINVAL;
}
/* old osdmap encoding */
struct_v = 0;
}
*v = struct_v;
return 0;
e_inval:
return -EINVAL;
}
static int __decode_pools(void **p, void *end, struct ceph_osdmap *map,
bool incremental)
{
u32 n;
ceph_decode_32_safe(p, end, n, e_inval);
while (n--) {
struct ceph_pg_pool_info *pi;
u64 pool;
int ret;
ceph_decode_64_safe(p, end, pool, e_inval);
pi = lookup_pg_pool(&map->pg_pools, pool);
if (!incremental || !pi) {
pi = kzalloc(sizeof(*pi), GFP_NOFS);
if (!pi)
return -ENOMEM;
RB_CLEAR_NODE(&pi->node);
pi->id = pool;
if (!__insert_pg_pool(&map->pg_pools, pi)) {
kfree(pi);
return -EEXIST;
}
}
ret = decode_pool(p, end, pi);
if (ret)
return ret;
}
return 0;
e_inval:
return -EINVAL;
}
static int decode_pools(void **p, void *end, struct ceph_osdmap *map)
{
return __decode_pools(p, end, map, false);
}
static int decode_new_pools(void **p, void *end, struct ceph_osdmap *map)
{
return __decode_pools(p, end, map, true);
}
typedef struct ceph_pg_mapping *(*decode_mapping_fn_t)(void **, void *, bool);
static int decode_pg_mapping(void **p, void *end, struct rb_root *mapping_root,
decode_mapping_fn_t fn, bool incremental)
{
u32 n;
WARN_ON(!incremental && !fn);
ceph_decode_32_safe(p, end, n, e_inval);
while (n--) {
struct ceph_pg_mapping *pg;
struct ceph_pg pgid;
int ret;
ret = ceph_decode_pgid(p, end, &pgid);
if (ret)
return ret;
pg = lookup_pg_mapping(mapping_root, &pgid);
if (pg) {
WARN_ON(!incremental);
erase_pg_mapping(mapping_root, pg);
free_pg_mapping(pg);
}
if (fn) {
pg = fn(p, end, incremental);
if (IS_ERR(pg))
return PTR_ERR(pg);
if (pg) {
pg->pgid = pgid; /* struct */
insert_pg_mapping(mapping_root, pg);
}
}
}
return 0;
e_inval:
return -EINVAL;
}
static struct ceph_pg_mapping *__decode_pg_temp(void **p, void *end,
bool incremental)
{
struct ceph_pg_mapping *pg;
u32 len, i;
ceph_decode_32_safe(p, end, len, e_inval);
if (len == 0 && incremental)
return NULL; /* new_pg_temp: [] to remove */
if (len > (SIZE_MAX - sizeof(*pg)) / sizeof(u32))
return ERR_PTR(-EINVAL);
ceph_decode_need(p, end, len * sizeof(u32), e_inval);
pg = alloc_pg_mapping(len * sizeof(u32));
if (!pg)
return ERR_PTR(-ENOMEM);
pg->pg_temp.len = len;
for (i = 0; i < len; i++)
pg->pg_temp.osds[i] = ceph_decode_32(p);
return pg;
e_inval:
return ERR_PTR(-EINVAL);
}
static int decode_pg_temp(void **p, void *end, struct ceph_osdmap *map)
{
return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp,
false);
}
static int decode_new_pg_temp(void **p, void *end, struct ceph_osdmap *map)
{
return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp,
true);
}
static struct ceph_pg_mapping *__decode_primary_temp(void **p, void *end,
bool incremental)
{
struct ceph_pg_mapping *pg;
u32 osd;
ceph_decode_32_safe(p, end, osd, e_inval);
if (osd == (u32)-1 && incremental)
return NULL; /* new_primary_temp: -1 to remove */
pg = alloc_pg_mapping(0);
if (!pg)
return ERR_PTR(-ENOMEM);
pg->primary_temp.osd = osd;
return pg;
e_inval:
return ERR_PTR(-EINVAL);
}
static int decode_primary_temp(void **p, void *end, struct ceph_osdmap *map)
{
return decode_pg_mapping(p, end, &map->primary_temp,
__decode_primary_temp, false);
}
static int decode_new_primary_temp(void **p, void *end,
struct ceph_osdmap *map)
{
return decode_pg_mapping(p, end, &map->primary_temp,
__decode_primary_temp, true);
}
u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd)
{
BUG_ON(osd >= map->max_osd);
if (!map->osd_primary_affinity)
return CEPH_OSD_DEFAULT_PRIMARY_AFFINITY;
return map->osd_primary_affinity[osd];
}
static int set_primary_affinity(struct ceph_osdmap *map, int osd, u32 aff)
{
BUG_ON(osd >= map->max_osd);
if (!map->osd_primary_affinity) {
int i;
map->osd_primary_affinity = kvmalloc(
array_size(map->max_osd, sizeof(*map->osd_primary_affinity)),
GFP_NOFS);
if (!map->osd_primary_affinity)
return -ENOMEM;
for (i = 0; i < map->max_osd; i++)
map->osd_primary_affinity[i] =
CEPH_OSD_DEFAULT_PRIMARY_AFFINITY;
}
map->osd_primary_affinity[osd] = aff;
return 0;
}
static int decode_primary_affinity(void **p, void *end,
struct ceph_osdmap *map)
{
u32 len, i;
ceph_decode_32_safe(p, end, len, e_inval);
if (len == 0) {
kvfree(map->osd_primary_affinity);
map->osd_primary_affinity = NULL;
return 0;
}
if (len != map->max_osd)
goto e_inval;
ceph_decode_need(p, end, map->max_osd*sizeof(u32), e_inval);
for (i = 0; i < map->max_osd; i++) {
int ret;
ret = set_primary_affinity(map, i, ceph_decode_32(p));
if (ret)
return ret;
}
return 0;
e_inval:
return -EINVAL;
}
static int decode_new_primary_affinity(void **p, void *end,
struct ceph_osdmap *map)
{
u32 n;
ceph_decode_32_safe(p, end, n, e_inval);
while (n--) {
u32 osd, aff;
int ret;
ceph_decode_32_safe(p, end, osd, e_inval);
ceph_decode_32_safe(p, end, aff, e_inval);
ret = set_primary_affinity(map, osd, aff);
if (ret)
return ret;
osdmap_info(map, "osd%d primary-affinity 0x%x\n", osd, aff);
}
return 0;
e_inval:
return -EINVAL;
}
static struct ceph_pg_mapping *__decode_pg_upmap(void **p, void *end,
bool __unused)
{
return __decode_pg_temp(p, end, false);
}
static int decode_pg_upmap(void **p, void *end, struct ceph_osdmap *map)
{
return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap,
false);
}
static int decode_new_pg_upmap(void **p, void *end, struct ceph_osdmap *map)
{
return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap,
true);
}
static int decode_old_pg_upmap(void **p, void *end, struct ceph_osdmap *map)
{
return decode_pg_mapping(p, end, &map->pg_upmap, NULL, true);
}
static struct ceph_pg_mapping *__decode_pg_upmap_items(void **p, void *end,
bool __unused)
{
struct ceph_pg_mapping *pg;
u32 len, i;
ceph_decode_32_safe(p, end, len, e_inval);
if (len > (SIZE_MAX - sizeof(*pg)) / (2 * sizeof(u32)))
return ERR_PTR(-EINVAL);
ceph_decode_need(p, end, 2 * len * sizeof(u32), e_inval);
pg = alloc_pg_mapping(2 * len * sizeof(u32));
if (!pg)
return ERR_PTR(-ENOMEM);
pg->pg_upmap_items.len = len;
for (i = 0; i < len; i++) {
pg->pg_upmap_items.from_to[i][0] = ceph_decode_32(p);
pg->pg_upmap_items.from_to[i][1] = ceph_decode_32(p);
}
return pg;
e_inval:
return ERR_PTR(-EINVAL);
}
static int decode_pg_upmap_items(void **p, void *end, struct ceph_osdmap *map)
{
return decode_pg_mapping(p, end, &map->pg_upmap_items,
__decode_pg_upmap_items, false);
}
static int decode_new_pg_upmap_items(void **p, void *end,
struct ceph_osdmap *map)
{
return decode_pg_mapping(p, end, &map->pg_upmap_items,
__decode_pg_upmap_items, true);
}
static int decode_old_pg_upmap_items(void **p, void *end,
struct ceph_osdmap *map)
{
return decode_pg_mapping(p, end, &map->pg_upmap_items, NULL, true);
}
/*
* decode a full map.
*/
static int osdmap_decode(void **p, void *end, bool msgr2,
struct ceph_osdmap *map)
{
u8 struct_v;
u32 epoch = 0;
void *start = *p;
u32 max;
u32 len, i;
int err;
dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p));
err = get_osdmap_client_data_v(p, end, "full", &struct_v);
if (err)
goto bad;
/* fsid, epoch, created, modified */
ceph_decode_need(p, end, sizeof(map->fsid) + sizeof(u32) +
sizeof(map->created) + sizeof(map->modified), e_inval);
ceph_decode_copy(p, &map->fsid, sizeof(map->fsid));
epoch = map->epoch = ceph_decode_32(p);
ceph_decode_copy(p, &map->created, sizeof(map->created));
ceph_decode_copy(p, &map->modified, sizeof(map->modified));
/* pools */
err = decode_pools(p, end, map);
if (err)
goto bad;
/* pool_name */
err = decode_pool_names(p, end, map);
if (err)
goto bad;
ceph_decode_32_safe(p, end, map->pool_max, e_inval);
ceph_decode_32_safe(p, end, map->flags, e_inval);
/* max_osd */
ceph_decode_32_safe(p, end, max, e_inval);
/* (re)alloc osd arrays */
err = osdmap_set_max_osd(map, max);
if (err)
goto bad;
/* osd_state, osd_weight, osd_addrs->client_addr */
ceph_decode_need(p, end, 3*sizeof(u32) +
map->max_osd*(struct_v >= 5 ? sizeof(u32) :
sizeof(u8)) +
sizeof(*map->osd_weight), e_inval);
if (ceph_decode_32(p) != map->max_osd)
goto e_inval;
if (struct_v >= 5) {
for (i = 0; i < map->max_osd; i++)
map->osd_state[i] = ceph_decode_32(p);
} else {
for (i = 0; i < map->max_osd; i++)
map->osd_state[i] = ceph_decode_8(p);
}
if (ceph_decode_32(p) != map->max_osd)
goto e_inval;
for (i = 0; i < map->max_osd; i++)
map->osd_weight[i] = ceph_decode_32(p);
if (ceph_decode_32(p) != map->max_osd)
goto e_inval;
for (i = 0; i < map->max_osd; i++) {
struct ceph_entity_addr *addr = &map->osd_addr[i];
if (struct_v >= 8)
err = ceph_decode_entity_addrvec(p, end, msgr2, addr);
else
err = ceph_decode_entity_addr(p, end, addr);
if (err)
goto bad;
dout("%s osd%d addr %s\n", __func__, i, ceph_pr_addr(addr));
}
/* pg_temp */
err = decode_pg_temp(p, end, map);
if (err)
goto bad;
/* primary_temp */
if (struct_v >= 1) {
err = decode_primary_temp(p, end, map);
if (err)
goto bad;
}
/* primary_affinity */
if (struct_v >= 2) {
err = decode_primary_affinity(p, end, map);
if (err)
goto bad;
} else {
WARN_ON(map->osd_primary_affinity);
}
/* crush */
ceph_decode_32_safe(p, end, len, e_inval);
err = osdmap_set_crush(map, crush_decode(*p, min(*p + len, end)));
if (err)
goto bad;
*p += len;
if (struct_v >= 3) {
/* erasure_code_profiles */
ceph_decode_skip_map_of_map(p, end, string, string, string,
e_inval);
}
if (struct_v >= 4) {
err = decode_pg_upmap(p, end, map);
if (err)
goto bad;
err = decode_pg_upmap_items(p, end, map);
if (err)
goto bad;
} else {
WARN_ON(!RB_EMPTY_ROOT(&map->pg_upmap));
WARN_ON(!RB_EMPTY_ROOT(&map->pg_upmap_items));
}
/* ignore the rest */
*p = end;
dout("full osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd);
return 0;
e_inval:
err = -EINVAL;
bad:
pr_err("corrupt full osdmap (%d) epoch %d off %d (%p of %p-%p)\n",
err, epoch, (int)(*p - start), *p, start, end);
print_hex_dump(KERN_DEBUG, "osdmap: ",
DUMP_PREFIX_OFFSET, 16, 1,
start, end - start, true);
return err;
}
/*
* Allocate and decode a full map.
*/
struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end, bool msgr2)
{
struct ceph_osdmap *map;
int ret;
map = ceph_osdmap_alloc();
if (!map)
return ERR_PTR(-ENOMEM);
ret = osdmap_decode(p, end, msgr2, map);
if (ret) {
ceph_osdmap_destroy(map);
return ERR_PTR(ret);
}
return map;
}
/*
* Encoding order is (new_up_client, new_state, new_weight). Need to
* apply in the (new_weight, new_state, new_up_client) order, because
* an incremental map may look like e.g.
*
* new_up_client: { osd=6, addr=... } # set osd_state and addr
* new_state: { osd=6, xorstate=EXISTS } # clear osd_state
*/
static int decode_new_up_state_weight(void **p, void *end, u8 struct_v,
bool msgr2, struct ceph_osdmap *map)
{
void *new_up_client;
void *new_state;
void *new_weight_end;
u32 len;
int ret;
int i;
new_up_client = *p;
ceph_decode_32_safe(p, end, len, e_inval);
for (i = 0; i < len; ++i) {
struct ceph_entity_addr addr;
ceph_decode_skip_32(p, end, e_inval);
if (struct_v >= 7)
ret = ceph_decode_entity_addrvec(p, end, msgr2, &addr);
else
ret = ceph_decode_entity_addr(p, end, &addr);
if (ret)
return ret;
}
new_state = *p;
ceph_decode_32_safe(p, end, len, e_inval);
len *= sizeof(u32) + (struct_v >= 5 ? sizeof(u32) : sizeof(u8));
ceph_decode_need(p, end, len, e_inval);
*p += len;
/* new_weight */
ceph_decode_32_safe(p, end, len, e_inval);
while (len--) {
s32 osd;
u32 w;
ceph_decode_need(p, end, 2*sizeof(u32), e_inval);
osd = ceph_decode_32(p);
w = ceph_decode_32(p);
BUG_ON(osd >= map->max_osd);
osdmap_info(map, "osd%d weight 0x%x %s\n", osd, w,
w == CEPH_OSD_IN ? "(in)" :
(w == CEPH_OSD_OUT ? "(out)" : ""));
map->osd_weight[osd] = w;
/*
* If we are marking in, set the EXISTS, and clear the
* AUTOOUT and NEW bits.
*/
if (w) {
map->osd_state[osd] |= CEPH_OSD_EXISTS;
map->osd_state[osd] &= ~(CEPH_OSD_AUTOOUT |
CEPH_OSD_NEW);
}
}
new_weight_end = *p;
/* new_state (up/down) */
*p = new_state;
len = ceph_decode_32(p);
while (len--) {
s32 osd;
u32 xorstate;
osd = ceph_decode_32(p);
if (struct_v >= 5)
xorstate = ceph_decode_32(p);
else
xorstate = ceph_decode_8(p);
if (xorstate == 0)
xorstate = CEPH_OSD_UP;
BUG_ON(osd >= map->max_osd);
if ((map->osd_state[osd] & CEPH_OSD_UP) &&
(xorstate & CEPH_OSD_UP))
osdmap_info(map, "osd%d down\n", osd);
if ((map->osd_state[osd] & CEPH_OSD_EXISTS) &&
(xorstate & CEPH_OSD_EXISTS)) {
osdmap_info(map, "osd%d does not exist\n", osd);
ret = set_primary_affinity(map, osd,
CEPH_OSD_DEFAULT_PRIMARY_AFFINITY);
if (ret)
return ret;
memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr));
map->osd_state[osd] = 0;
} else {
map->osd_state[osd] ^= xorstate;
}
}
/* new_up_client */
*p = new_up_client;
len = ceph_decode_32(p);
while (len--) {
s32 osd;
struct ceph_entity_addr addr;
osd = ceph_decode_32(p);
BUG_ON(osd >= map->max_osd);
if (struct_v >= 7)
ret = ceph_decode_entity_addrvec(p, end, msgr2, &addr);
else
ret = ceph_decode_entity_addr(p, end, &addr);
if (ret)
return ret;
dout("%s osd%d addr %s\n", __func__, osd, ceph_pr_addr(&addr));
osdmap_info(map, "osd%d up\n", osd);
map->osd_state[osd] |= CEPH_OSD_EXISTS | CEPH_OSD_UP;
map->osd_addr[osd] = addr;
}
*p = new_weight_end;
return 0;
e_inval:
return -EINVAL;
}
/*
* decode and apply an incremental map update.
*/
struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, bool msgr2,
struct ceph_osdmap *map)
{
struct ceph_fsid fsid;
u32 epoch = 0;
struct ceph_timespec modified;
s32 len;
u64 pool;
__s64 new_pool_max;
__s32 new_flags, max;
void *start = *p;
int err;
u8 struct_v;
dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p));
err = get_osdmap_client_data_v(p, end, "inc", &struct_v);
if (err)
goto bad;
/* fsid, epoch, modified, new_pool_max, new_flags */
ceph_decode_need(p, end, sizeof(fsid) + sizeof(u32) + sizeof(modified) +
sizeof(u64) + sizeof(u32), e_inval);
ceph_decode_copy(p, &fsid, sizeof(fsid));
epoch = ceph_decode_32(p);
BUG_ON(epoch != map->epoch+1);
ceph_decode_copy(p, &modified, sizeof(modified));
new_pool_max = ceph_decode_64(p);
new_flags = ceph_decode_32(p);
/* full map? */
ceph_decode_32_safe(p, end, len, e_inval);
if (len > 0) {
dout("apply_incremental full map len %d, %p to %p\n",
len, *p, end);
return ceph_osdmap_decode(p, min(*p+len, end), msgr2);
}
/* new crush? */
ceph_decode_32_safe(p, end, len, e_inval);
if (len > 0) {
err = osdmap_set_crush(map,
crush_decode(*p, min(*p + len, end)));
if (err)
goto bad;
*p += len;
}
/* new flags? */
if (new_flags >= 0)
map->flags = new_flags;
if (new_pool_max >= 0)
map->pool_max = new_pool_max;
/* new max? */
ceph_decode_32_safe(p, end, max, e_inval);
if (max >= 0) {
err = osdmap_set_max_osd(map, max);
if (err)
goto bad;
}
map->epoch++;
map->modified = modified;
/* new_pools */
err = decode_new_pools(p, end, map);
if (err)
goto bad;
/* new_pool_names */
err = decode_pool_names(p, end, map);
if (err)
goto bad;
/* old_pool */
ceph_decode_32_safe(p, end, len, e_inval);
while (len--) {
struct ceph_pg_pool_info *pi;
ceph_decode_64_safe(p, end, pool, e_inval);
pi = lookup_pg_pool(&map->pg_pools, pool);
if (pi)
__remove_pg_pool(&map->pg_pools, pi);
}
/* new_up_client, new_state, new_weight */
err = decode_new_up_state_weight(p, end, struct_v, msgr2, map);
if (err)
goto bad;
/* new_pg_temp */
err = decode_new_pg_temp(p, end, map);
if (err)
goto bad;
/* new_primary_temp */
if (struct_v >= 1) {
err = decode_new_primary_temp(p, end, map);
if (err)
goto bad;
}
/* new_primary_affinity */
if (struct_v >= 2) {
err = decode_new_primary_affinity(p, end, map);
if (err)
goto bad;
}
if (struct_v >= 3) {
/* new_erasure_code_profiles */
ceph_decode_skip_map_of_map(p, end, string, string, string,
e_inval);
/* old_erasure_code_profiles */
ceph_decode_skip_set(p, end, string, e_inval);
}
if (struct_v >= 4) {
err = decode_new_pg_upmap(p, end, map);
if (err)
goto bad;
err = decode_old_pg_upmap(p, end, map);
if (err)
goto bad;
err = decode_new_pg_upmap_items(p, end, map);
if (err)
goto bad;
err = decode_old_pg_upmap_items(p, end, map);
if (err)
goto bad;
}
/* ignore the rest */
*p = end;
dout("inc osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd);
return map;
e_inval:
err = -EINVAL;
bad:
pr_err("corrupt inc osdmap (%d) epoch %d off %d (%p of %p-%p)\n",
err, epoch, (int)(*p - start), *p, start, end);
print_hex_dump(KERN_DEBUG, "osdmap: ",
DUMP_PREFIX_OFFSET, 16, 1,
start, end - start, true);
return ERR_PTR(err);
}
void ceph_oloc_copy(struct ceph_object_locator *dest,
const struct ceph_object_locator *src)
{
ceph_oloc_destroy(dest);
dest->pool = src->pool;
if (src->pool_ns)
dest->pool_ns = ceph_get_string(src->pool_ns);
else
dest->pool_ns = NULL;
}
EXPORT_SYMBOL(ceph_oloc_copy);
void ceph_oloc_destroy(struct ceph_object_locator *oloc)
{
ceph_put_string(oloc->pool_ns);
}
EXPORT_SYMBOL(ceph_oloc_destroy);
void ceph_oid_copy(struct ceph_object_id *dest,
const struct ceph_object_id *src)
{
ceph_oid_destroy(dest);
if (src->name != src->inline_name) {
/* very rare, see ceph_object_id definition */
dest->name = kmalloc(src->name_len + 1,
GFP_NOIO | __GFP_NOFAIL);
} else {
dest->name = dest->inline_name;
}
memcpy(dest->name, src->name, src->name_len + 1);
dest->name_len = src->name_len;
}
EXPORT_SYMBOL(ceph_oid_copy);
static __printf(2, 0)
int oid_printf_vargs(struct ceph_object_id *oid, const char *fmt, va_list ap)
{
int len;
WARN_ON(!ceph_oid_empty(oid));
len = vsnprintf(oid->inline_name, sizeof(oid->inline_name), fmt, ap);
if (len >= sizeof(oid->inline_name))
return len;
oid->name_len = len;
return 0;
}
/*
* If oid doesn't fit into inline buffer, BUG.
*/
void ceph_oid_printf(struct ceph_object_id *oid, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
BUG_ON(oid_printf_vargs(oid, fmt, ap));
va_end(ap);
}
EXPORT_SYMBOL(ceph_oid_printf);
static __printf(3, 0)
int oid_aprintf_vargs(struct ceph_object_id *oid, gfp_t gfp,
const char *fmt, va_list ap)
{
va_list aq;
int len;
va_copy(aq, ap);
len = oid_printf_vargs(oid, fmt, aq);
va_end(aq);
if (len) {
char *external_name;
external_name = kmalloc(len + 1, gfp);
if (!external_name)
return -ENOMEM;
oid->name = external_name;
WARN_ON(vsnprintf(oid->name, len + 1, fmt, ap) != len);
oid->name_len = len;
}
return 0;
}
/*
* If oid doesn't fit into inline buffer, allocate.
*/
int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp,
const char *fmt, ...)
{
va_list ap;
int ret;
va_start(ap, fmt);
ret = oid_aprintf_vargs(oid, gfp, fmt, ap);
va_end(ap);
return ret;
}
EXPORT_SYMBOL(ceph_oid_aprintf);
void ceph_oid_destroy(struct ceph_object_id *oid)
{
if (oid->name != oid->inline_name)
kfree(oid->name);
}
EXPORT_SYMBOL(ceph_oid_destroy);
/*
* osds only
*/
static bool __osds_equal(const struct ceph_osds *lhs,
const struct ceph_osds *rhs)
{
if (lhs->size == rhs->size &&
!memcmp(lhs->osds, rhs->osds, rhs->size * sizeof(rhs->osds[0])))
return true;
return false;
}
/*
* osds + primary
*/
static bool osds_equal(const struct ceph_osds *lhs,
const struct ceph_osds *rhs)
{
if (__osds_equal(lhs, rhs) &&
lhs->primary == rhs->primary)
return true;
return false;
}
static bool osds_valid(const struct ceph_osds *set)
{
/* non-empty set */
if (set->size > 0 && set->primary >= 0)
return true;
/* empty can_shift_osds set */
if (!set->size && set->primary == -1)
return true;
/* empty !can_shift_osds set - all NONE */
if (set->size > 0 && set->primary == -1) {
int i;
for (i = 0; i < set->size; i++) {
if (set->osds[i] != CRUSH_ITEM_NONE)
break;
}
if (i == set->size)
return true;
}
return false;
}
void ceph_osds_copy(struct ceph_osds *dest, const struct ceph_osds *src)
{
memcpy(dest->osds, src->osds, src->size * sizeof(src->osds[0]));
dest->size = src->size;
dest->primary = src->primary;
}
bool ceph_pg_is_split(const struct ceph_pg *pgid, u32 old_pg_num,
u32 new_pg_num)
{
int old_bits = calc_bits_of(old_pg_num);
int old_mask = (1 << old_bits) - 1;
int n;
WARN_ON(pgid->seed >= old_pg_num);
if (new_pg_num <= old_pg_num)
return false;
for (n = 1; ; n++) {
int next_bit = n << (old_bits - 1);
u32 s = next_bit | pgid->seed;
if (s < old_pg_num || s == pgid->seed)
continue;
if (s >= new_pg_num)
break;
s = ceph_stable_mod(s, old_pg_num, old_mask);
if (s == pgid->seed)
return true;
}
return false;
}
bool ceph_is_new_interval(const struct ceph_osds *old_acting,
const struct ceph_osds *new_acting,
const struct ceph_osds *old_up,
const struct ceph_osds *new_up,
int old_size,
int new_size,
int old_min_size,
int new_min_size,
u32 old_pg_num,
u32 new_pg_num,
bool old_sort_bitwise,
bool new_sort_bitwise,
bool old_recovery_deletes,
bool new_recovery_deletes,
const struct ceph_pg *pgid)
{
return !osds_equal(old_acting, new_acting) ||
!osds_equal(old_up, new_up) ||
old_size != new_size ||
old_min_size != new_min_size ||
ceph_pg_is_split(pgid, old_pg_num, new_pg_num) ||
old_sort_bitwise != new_sort_bitwise ||
old_recovery_deletes != new_recovery_deletes;
}
static int calc_pg_rank(int osd, const struct ceph_osds *acting)
{
int i;
for (i = 0; i < acting->size; i++) {
if (acting->osds[i] == osd)
return i;
}
return -1;
}
static bool primary_changed(const struct ceph_osds *old_acting,
const struct ceph_osds *new_acting)
{
if (!old_acting->size && !new_acting->size)
return false; /* both still empty */
if (!old_acting->size ^ !new_acting->size)
return true; /* was empty, now not, or vice versa */
if (old_acting->primary != new_acting->primary)
return true; /* primary changed */
if (calc_pg_rank(old_acting->primary, old_acting) !=
calc_pg_rank(new_acting->primary, new_acting))
return true;
return false; /* same primary (tho replicas may have changed) */
}
bool ceph_osds_changed(const struct ceph_osds *old_acting,
const struct ceph_osds *new_acting,
bool any_change)
{
if (primary_changed(old_acting, new_acting))
return true;
if (any_change && !__osds_equal(old_acting, new_acting))
return true;
return false;
}
/*
* Map an object into a PG.
*
* Should only be called with target_oid and target_oloc (as opposed to
* base_oid and base_oloc), since tiering isn't taken into account.
*/
void __ceph_object_locator_to_pg(struct ceph_pg_pool_info *pi,
const struct ceph_object_id *oid,
const struct ceph_object_locator *oloc,
struct ceph_pg *raw_pgid)
{
WARN_ON(pi->id != oloc->pool);
if (!oloc->pool_ns) {
raw_pgid->pool = oloc->pool;
raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name,
oid->name_len);
dout("%s %s -> raw_pgid %llu.%x\n", __func__, oid->name,
raw_pgid->pool, raw_pgid->seed);
} else {
char stack_buf[256];
char *buf = stack_buf;
int nsl = oloc->pool_ns->len;
size_t total = nsl + 1 + oid->name_len;
if (total > sizeof(stack_buf))
buf = kmalloc(total, GFP_NOIO | __GFP_NOFAIL);
memcpy(buf, oloc->pool_ns->str, nsl);
buf[nsl] = '\037';
memcpy(buf + nsl + 1, oid->name, oid->name_len);
raw_pgid->pool = oloc->pool;
raw_pgid->seed = ceph_str_hash(pi->object_hash, buf, total);
if (buf != stack_buf)
kfree(buf);
dout("%s %s ns %.*s -> raw_pgid %llu.%x\n", __func__,
oid->name, nsl, oloc->pool_ns->str,
raw_pgid->pool, raw_pgid->seed);
}
}
int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap,
const struct ceph_object_id *oid,
const struct ceph_object_locator *oloc,
struct ceph_pg *raw_pgid)
{
struct ceph_pg_pool_info *pi;
pi = ceph_pg_pool_by_id(osdmap, oloc->pool);
if (!pi)
return -ENOENT;
__ceph_object_locator_to_pg(pi, oid, oloc, raw_pgid);
return 0;
}
EXPORT_SYMBOL(ceph_object_locator_to_pg);
/*
* Map a raw PG (full precision ps) into an actual PG.
*/
static void raw_pg_to_pg(struct ceph_pg_pool_info *pi,
const struct ceph_pg *raw_pgid,
struct ceph_pg *pgid)
{
pgid->pool = raw_pgid->pool;
pgid->seed = ceph_stable_mod(raw_pgid->seed, pi->pg_num,
pi->pg_num_mask);
}
/*
* Map a raw PG (full precision ps) into a placement ps (placement
* seed). Include pool id in that value so that different pools don't
* use the same seeds.
*/
static u32 raw_pg_to_pps(struct ceph_pg_pool_info *pi,
const struct ceph_pg *raw_pgid)
{
if (pi->flags & CEPH_POOL_FLAG_HASHPSPOOL) {
/* hash pool id and seed so that pool PGs do not overlap */
return crush_hash32_2(CRUSH_HASH_RJENKINS1,
ceph_stable_mod(raw_pgid->seed,
pi->pgp_num,
pi->pgp_num_mask),
raw_pgid->pool);
} else {
/*
* legacy behavior: add ps and pool together. this is
* not a great approach because the PGs from each pool
* will overlap on top of each other: 0.5 == 1.4 ==
* 2.3 == ...
*/
return ceph_stable_mod(raw_pgid->seed, pi->pgp_num,
pi->pgp_num_mask) +
(unsigned)raw_pgid->pool;
}
}
/*
* Magic value used for a "default" fallback choose_args, used if the
* crush_choose_arg_map passed to do_crush() does not exist. If this
* also doesn't exist, fall back to canonical weights.
*/
#define CEPH_DEFAULT_CHOOSE_ARGS -1
static int do_crush(struct ceph_osdmap *map, int ruleno, int x,
int *result, int result_max,
const __u32 *weight, int weight_max,
s64 choose_args_index)
{
struct crush_choose_arg_map *arg_map;
struct crush_work *work;
int r;
BUG_ON(result_max > CEPH_PG_MAX_SIZE);
arg_map = lookup_choose_arg_map(&map->crush->choose_args,
choose_args_index);
if (!arg_map)
arg_map = lookup_choose_arg_map(&map->crush->choose_args,
CEPH_DEFAULT_CHOOSE_ARGS);
work = get_workspace(&map->crush_wsm, map->crush);
r = crush_do_rule(map->crush, ruleno, x, result, result_max,
weight, weight_max, work,
arg_map ? arg_map->args : NULL);
put_workspace(&map->crush_wsm, work);
return r;
}
static void remove_nonexistent_osds(struct ceph_osdmap *osdmap,
struct ceph_pg_pool_info *pi,
struct ceph_osds *set)
{
int i;
if (ceph_can_shift_osds(pi)) {
int removed = 0;
/* shift left */
for (i = 0; i < set->size; i++) {
if (!ceph_osd_exists(osdmap, set->osds[i])) {
removed++;
continue;
}
if (removed)
set->osds[i - removed] = set->osds[i];
}
set->size -= removed;
} else {
/* set dne devices to NONE */
for (i = 0; i < set->size; i++) {
if (!ceph_osd_exists(osdmap, set->osds[i]))
set->osds[i] = CRUSH_ITEM_NONE;
}
}
}
/*
* Calculate raw set (CRUSH output) for given PG and filter out
* nonexistent OSDs. ->primary is undefined for a raw set.
*
* Placement seed (CRUSH input) is returned through @ppps.
*/
static void pg_to_raw_osds(struct ceph_osdmap *osdmap,
struct ceph_pg_pool_info *pi,
const struct ceph_pg *raw_pgid,
struct ceph_osds *raw,
u32 *ppps)
{
u32 pps = raw_pg_to_pps(pi, raw_pgid);
int ruleno;
int len;
ceph_osds_init(raw);
if (ppps)
*ppps = pps;
ruleno = crush_find_rule(osdmap->crush, pi->crush_ruleset, pi->type,
pi->size);
if (ruleno < 0) {
pr_err("no crush rule: pool %lld ruleset %d type %d size %d\n",
pi->id, pi->crush_ruleset, pi->type, pi->size);
return;
}
if (pi->size > ARRAY_SIZE(raw->osds)) {
pr_err_ratelimited("pool %lld ruleset %d type %d too wide: size %d > %zu\n",
pi->id, pi->crush_ruleset, pi->type, pi->size,
ARRAY_SIZE(raw->osds));
return;
}
len = do_crush(osdmap, ruleno, pps, raw->osds, pi->size,
osdmap->osd_weight, osdmap->max_osd, pi->id);
if (len < 0) {
pr_err("error %d from crush rule %d: pool %lld ruleset %d type %d size %d\n",
len, ruleno, pi->id, pi->crush_ruleset, pi->type,
pi->size);
return;
}
raw->size = len;
remove_nonexistent_osds(osdmap, pi, raw);
}
/* apply pg_upmap[_items] mappings */
static void apply_upmap(struct ceph_osdmap *osdmap,
const struct ceph_pg *pgid,
struct ceph_osds *raw)
{
struct ceph_pg_mapping *pg;
int i, j;
pg = lookup_pg_mapping(&osdmap->pg_upmap, pgid);
if (pg) {
/* make sure targets aren't marked out */
for (i = 0; i < pg->pg_upmap.len; i++) {
int osd = pg->pg_upmap.osds[i];
if (osd != CRUSH_ITEM_NONE &&
osd < osdmap->max_osd &&
osdmap->osd_weight[osd] == 0) {
/* reject/ignore explicit mapping */
return;
}
}
for (i = 0; i < pg->pg_upmap.len; i++)
raw->osds[i] = pg->pg_upmap.osds[i];
raw->size = pg->pg_upmap.len;
/* check and apply pg_upmap_items, if any */
}
pg = lookup_pg_mapping(&osdmap->pg_upmap_items, pgid);
if (pg) {
/*
* Note: this approach does not allow a bidirectional swap,
* e.g., [[1,2],[2,1]] applied to [0,1,2] -> [0,2,1].
*/
for (i = 0; i < pg->pg_upmap_items.len; i++) {
int from = pg->pg_upmap_items.from_to[i][0];
int to = pg->pg_upmap_items.from_to[i][1];
int pos = -1;
bool exists = false;
/* make sure replacement doesn't already appear */
for (j = 0; j < raw->size; j++) {
int osd = raw->osds[j];
if (osd == to) {
exists = true;
break;
}
/* ignore mapping if target is marked out */
if (osd == from && pos < 0 &&
!(to != CRUSH_ITEM_NONE &&
to < osdmap->max_osd &&
osdmap->osd_weight[to] == 0)) {
pos = j;
}
}
if (!exists && pos >= 0)
raw->osds[pos] = to;
}
}
}
/*
* Given raw set, calculate up set and up primary. By definition of an
* up set, the result won't contain nonexistent or down OSDs.
*
* This is done in-place - on return @set is the up set. If it's
* empty, ->primary will remain undefined.
*/
static void raw_to_up_osds(struct ceph_osdmap *osdmap,
struct ceph_pg_pool_info *pi,
struct ceph_osds *set)
{
int i;
/* ->primary is undefined for a raw set */
BUG_ON(set->primary != -1);
if (ceph_can_shift_osds(pi)) {
int removed = 0;
/* shift left */
for (i = 0; i < set->size; i++) {
if (ceph_osd_is_down(osdmap, set->osds[i])) {
removed++;
continue;
}
if (removed)
set->osds[i - removed] = set->osds[i];
}
set->size -= removed;
if (set->size > 0)
set->primary = set->osds[0];
} else {
/* set down/dne devices to NONE */
for (i = set->size - 1; i >= 0; i--) {
if (ceph_osd_is_down(osdmap, set->osds[i]))
set->osds[i] = CRUSH_ITEM_NONE;
else
set->primary = set->osds[i];
}
}
}
static void apply_primary_affinity(struct ceph_osdmap *osdmap,
struct ceph_pg_pool_info *pi,
u32 pps,
struct ceph_osds *up)
{
int i;
int pos = -1;
/*
* Do we have any non-default primary_affinity values for these
* osds?
*/
if (!osdmap->osd_primary_affinity)
return;
for (i = 0; i < up->size; i++) {
int osd = up->osds[i];
if (osd != CRUSH_ITEM_NONE &&
osdmap->osd_primary_affinity[osd] !=
CEPH_OSD_DEFAULT_PRIMARY_AFFINITY) {
break;
}
}
if (i == up->size)
return;
/*
* Pick the primary. Feed both the seed (for the pg) and the
* osd into the hash/rng so that a proportional fraction of an
* osd's pgs get rejected as primary.
*/
for (i = 0; i < up->size; i++) {
int osd = up->osds[i];
u32 aff;
if (osd == CRUSH_ITEM_NONE)
continue;
aff = osdmap->osd_primary_affinity[osd];
if (aff < CEPH_OSD_MAX_PRIMARY_AFFINITY &&
(crush_hash32_2(CRUSH_HASH_RJENKINS1,
pps, osd) >> 16) >= aff) {
/*
* We chose not to use this primary. Note it
* anyway as a fallback in case we don't pick
* anyone else, but keep looking.
*/
if (pos < 0)
pos = i;
} else {
pos = i;
break;
}
}
if (pos < 0)
return;
up->primary = up->osds[pos];
if (ceph_can_shift_osds(pi) && pos > 0) {
/* move the new primary to the front */
for (i = pos; i > 0; i--)
up->osds[i] = up->osds[i - 1];
up->osds[0] = up->primary;
}
}
/*
* Get pg_temp and primary_temp mappings for given PG.
*
* Note that a PG may have none, only pg_temp, only primary_temp or
* both pg_temp and primary_temp mappings. This means @temp isn't
* always a valid OSD set on return: in the "only primary_temp" case,
* @temp will have its ->primary >= 0 but ->size == 0.
*/
static void get_temp_osds(struct ceph_osdmap *osdmap,
struct ceph_pg_pool_info *pi,
const struct ceph_pg *pgid,
struct ceph_osds *temp)
{
struct ceph_pg_mapping *pg;
int i;
ceph_osds_init(temp);
/* pg_temp? */
pg = lookup_pg_mapping(&osdmap->pg_temp, pgid);
if (pg) {
for (i = 0; i < pg->pg_temp.len; i++) {
if (ceph_osd_is_down(osdmap, pg->pg_temp.osds[i])) {
if (ceph_can_shift_osds(pi))
continue;
temp->osds[temp->size++] = CRUSH_ITEM_NONE;
} else {
temp->osds[temp->size++] = pg->pg_temp.osds[i];
}
}
/* apply pg_temp's primary */
for (i = 0; i < temp->size; i++) {
if (temp->osds[i] != CRUSH_ITEM_NONE) {
temp->primary = temp->osds[i];
break;
}
}
}
/* primary_temp? */
pg = lookup_pg_mapping(&osdmap->primary_temp, pgid);
if (pg)
temp->primary = pg->primary_temp.osd;
}
/*
* Map a PG to its acting set as well as its up set.
*
* Acting set is used for data mapping purposes, while up set can be
* recorded for detecting interval changes and deciding whether to
* resend a request.
*/
void ceph_pg_to_up_acting_osds(struct ceph_osdmap *osdmap,
struct ceph_pg_pool_info *pi,
const struct ceph_pg *raw_pgid,
struct ceph_osds *up,
struct ceph_osds *acting)
{
struct ceph_pg pgid;
u32 pps;
WARN_ON(pi->id != raw_pgid->pool);
raw_pg_to_pg(pi, raw_pgid, &pgid);
pg_to_raw_osds(osdmap, pi, raw_pgid, up, &pps);
apply_upmap(osdmap, &pgid, up);
raw_to_up_osds(osdmap, pi, up);
apply_primary_affinity(osdmap, pi, pps, up);
get_temp_osds(osdmap, pi, &pgid, acting);
if (!acting->size) {
memcpy(acting->osds, up->osds, up->size * sizeof(up->osds[0]));
acting->size = up->size;
if (acting->primary == -1)
acting->primary = up->primary;
}
WARN_ON(!osds_valid(up) || !osds_valid(acting));
}
bool ceph_pg_to_primary_shard(struct ceph_osdmap *osdmap,
struct ceph_pg_pool_info *pi,
const struct ceph_pg *raw_pgid,
struct ceph_spg *spgid)
{
struct ceph_pg pgid;
struct ceph_osds up, acting;
int i;
WARN_ON(pi->id != raw_pgid->pool);
raw_pg_to_pg(pi, raw_pgid, &pgid);
if (ceph_can_shift_osds(pi)) {
spgid->pgid = pgid; /* struct */
spgid->shard = CEPH_SPG_NOSHARD;
return true;
}
ceph_pg_to_up_acting_osds(osdmap, pi, &pgid, &up, &acting);
for (i = 0; i < acting.size; i++) {
if (acting.osds[i] == acting.primary) {
spgid->pgid = pgid; /* struct */
spgid->shard = i;
return true;
}
}
return false;
}
/*
* Return acting primary for given PG, or -1 if none.
*/
int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap,
const struct ceph_pg *raw_pgid)
{
struct ceph_pg_pool_info *pi;
struct ceph_osds up, acting;
pi = ceph_pg_pool_by_id(osdmap, raw_pgid->pool);
if (!pi)
return -1;
ceph_pg_to_up_acting_osds(osdmap, pi, raw_pgid, &up, &acting);
return acting.primary;
}
EXPORT_SYMBOL(ceph_pg_to_acting_primary);
static struct crush_loc_node *alloc_crush_loc(size_t type_name_len,
size_t name_len)
{
struct crush_loc_node *loc;
loc = kmalloc(sizeof(*loc) + type_name_len + name_len + 2, GFP_NOIO);
if (!loc)
return NULL;
RB_CLEAR_NODE(&loc->cl_node);
return loc;
}
static void free_crush_loc(struct crush_loc_node *loc)
{
WARN_ON(!RB_EMPTY_NODE(&loc->cl_node));
kfree(loc);
}
static int crush_loc_compare(const struct crush_loc *loc1,
const struct crush_loc *loc2)
{
return strcmp(loc1->cl_type_name, loc2->cl_type_name) ?:
strcmp(loc1->cl_name, loc2->cl_name);
}
DEFINE_RB_FUNCS2(crush_loc, struct crush_loc_node, cl_loc, crush_loc_compare,
RB_BYPTR, const struct crush_loc *, cl_node)
/*
* Parses a set of <bucket type name>':'<bucket name> pairs separated
* by '|', e.g. "rack:foo1|rack:foo2|datacenter:bar".
*
* Note that @crush_location is modified by strsep().
*/
int ceph_parse_crush_location(char *crush_location, struct rb_root *locs)
{
struct crush_loc_node *loc;
const char *type_name, *name, *colon;
size_t type_name_len, name_len;
dout("%s '%s'\n", __func__, crush_location);
while ((type_name = strsep(&crush_location, "|"))) {
colon = strchr(type_name, ':');
if (!colon)
return -EINVAL;
type_name_len = colon - type_name;
if (type_name_len == 0)
return -EINVAL;
name = colon + 1;
name_len = strlen(name);
if (name_len == 0)
return -EINVAL;
loc = alloc_crush_loc(type_name_len, name_len);
if (!loc)
return -ENOMEM;
loc->cl_loc.cl_type_name = loc->cl_data;
memcpy(loc->cl_loc.cl_type_name, type_name, type_name_len);
loc->cl_loc.cl_type_name[type_name_len] = '\0';
loc->cl_loc.cl_name = loc->cl_data + type_name_len + 1;
memcpy(loc->cl_loc.cl_name, name, name_len);
loc->cl_loc.cl_name[name_len] = '\0';
if (!__insert_crush_loc(locs, loc)) {
free_crush_loc(loc);
return -EEXIST;
}
dout("%s type_name '%s' name '%s'\n", __func__,
loc->cl_loc.cl_type_name, loc->cl_loc.cl_name);
}
return 0;
}
int ceph_compare_crush_locs(struct rb_root *locs1, struct rb_root *locs2)
{
struct rb_node *n1 = rb_first(locs1);
struct rb_node *n2 = rb_first(locs2);
int ret;
for ( ; n1 && n2; n1 = rb_next(n1), n2 = rb_next(n2)) {
struct crush_loc_node *loc1 =
rb_entry(n1, struct crush_loc_node, cl_node);
struct crush_loc_node *loc2 =
rb_entry(n2, struct crush_loc_node, cl_node);
ret = crush_loc_compare(&loc1->cl_loc, &loc2->cl_loc);
if (ret)
return ret;
}
if (!n1 && n2)
return -1;
if (n1 && !n2)
return 1;
return 0;
}
void ceph_clear_crush_locs(struct rb_root *locs)
{
while (!RB_EMPTY_ROOT(locs)) {
struct crush_loc_node *loc =
rb_entry(rb_first(locs), struct crush_loc_node, cl_node);
erase_crush_loc(locs, loc);
free_crush_loc(loc);
}
}
/*
* [a-zA-Z0-9-_.]+
*/
static bool is_valid_crush_name(const char *name)
{
do {
if (!('a' <= *name && *name <= 'z') &&
!('A' <= *name && *name <= 'Z') &&
!('0' <= *name && *name <= '9') &&
*name != '-' && *name != '_' && *name != '.')
return false;
} while (*++name != '\0');
return true;
}
/*
* Gets the parent of an item. Returns its id (<0 because the
* parent is always a bucket), type id (>0 for the same reason,
* via @parent_type_id) and location (via @parent_loc). If no
* parent, returns 0.
*
* Does a linear search, as there are no parent pointers of any
* kind. Note that the result is ambiguous for items that occur
* multiple times in the map.
*/
static int get_immediate_parent(struct crush_map *c, int id,
u16 *parent_type_id,
struct crush_loc *parent_loc)
{
struct crush_bucket *b;
struct crush_name_node *type_cn, *cn;
int i, j;
for (i = 0; i < c->max_buckets; i++) {
b = c->buckets[i];
if (!b)
continue;
/* ignore per-class shadow hierarchy */
cn = lookup_crush_name(&c->names, b->id);
if (!cn || !is_valid_crush_name(cn->cn_name))
continue;
for (j = 0; j < b->size; j++) {
if (b->items[j] != id)
continue;
*parent_type_id = b->type;
type_cn = lookup_crush_name(&c->type_names, b->type);
parent_loc->cl_type_name = type_cn->cn_name;
parent_loc->cl_name = cn->cn_name;
return b->id;
}
}
return 0; /* no parent */
}
/*
* Calculates the locality/distance from an item to a client
* location expressed in terms of CRUSH hierarchy as a set of
* (bucket type name, bucket name) pairs. Specifically, looks
* for the lowest-valued bucket type for which the location of
* @id matches one of the locations in @locs, so for standard
* bucket types (host = 1, rack = 3, datacenter = 8, zone = 9)
* a matching host is closer than a matching rack and a matching
* data center is closer than a matching zone.
*
* Specifying multiple locations (a "multipath" location) such
* as "rack=foo1 rack=foo2 datacenter=bar" is allowed -- @locs
* is a multimap. The locality will be:
*
* - 3 for OSDs in racks foo1 and foo2
* - 8 for OSDs in data center bar
* - -1 for all other OSDs
*
* The lowest possible bucket type is 1, so the best locality
* for an OSD is 1 (i.e. a matching host). Locality 0 would be
* the OSD itself.
*/
int ceph_get_crush_locality(struct ceph_osdmap *osdmap, int id,
struct rb_root *locs)
{
struct crush_loc loc;
u16 type_id;
/*
* Instead of repeated get_immediate_parent() calls,
* the location of @id could be obtained with a single
* depth-first traversal.
*/
for (;;) {
id = get_immediate_parent(osdmap->crush, id, &type_id, &loc);
if (id >= 0)
return -1; /* not local */
if (lookup_crush_loc(locs, &loc))
return type_id;
}
}
| linux-master | net/ceph/osdmap.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/ceph/ceph_debug.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/ceph/types.h>
#include <linux/ceph/decode.h>
#include <linux/ceph/libceph.h>
#include <linux/ceph/messenger.h>
#include "auth_none.h"
#include "auth_x.h"
/*
* get protocol handler
*/
static u32 supported_protocols[] = {
CEPH_AUTH_NONE,
CEPH_AUTH_CEPHX
};
static int init_protocol(struct ceph_auth_client *ac, int proto)
{
dout("%s proto %d\n", __func__, proto);
switch (proto) {
case CEPH_AUTH_NONE:
return ceph_auth_none_init(ac);
case CEPH_AUTH_CEPHX:
return ceph_x_init(ac);
default:
pr_err("bad auth protocol %d\n", proto);
return -EINVAL;
}
}
void ceph_auth_set_global_id(struct ceph_auth_client *ac, u64 global_id)
{
dout("%s global_id %llu\n", __func__, global_id);
if (!global_id)
pr_err("got zero global_id\n");
if (ac->global_id && global_id != ac->global_id)
pr_err("global_id changed from %llu to %llu\n", ac->global_id,
global_id);
ac->global_id = global_id;
}
/*
* setup, teardown.
*/
struct ceph_auth_client *ceph_auth_init(const char *name,
const struct ceph_crypto_key *key,
const int *con_modes)
{
struct ceph_auth_client *ac;
ac = kzalloc(sizeof(*ac), GFP_NOFS);
if (!ac)
return ERR_PTR(-ENOMEM);
mutex_init(&ac->mutex);
ac->negotiating = true;
if (name)
ac->name = name;
else
ac->name = CEPH_AUTH_NAME_DEFAULT;
ac->key = key;
ac->preferred_mode = con_modes[0];
ac->fallback_mode = con_modes[1];
dout("%s name '%s' preferred_mode %d fallback_mode %d\n", __func__,
ac->name, ac->preferred_mode, ac->fallback_mode);
return ac;
}
void ceph_auth_destroy(struct ceph_auth_client *ac)
{
dout("auth_destroy %p\n", ac);
if (ac->ops)
ac->ops->destroy(ac);
kfree(ac);
}
/*
* Reset occurs when reconnecting to the monitor.
*/
void ceph_auth_reset(struct ceph_auth_client *ac)
{
mutex_lock(&ac->mutex);
dout("auth_reset %p\n", ac);
if (ac->ops && !ac->negotiating)
ac->ops->reset(ac);
ac->negotiating = true;
mutex_unlock(&ac->mutex);
}
/*
* EntityName, not to be confused with entity_name_t
*/
int ceph_auth_entity_name_encode(const char *name, void **p, void *end)
{
int len = strlen(name);
if (*p + 2*sizeof(u32) + len > end)
return -ERANGE;
ceph_encode_32(p, CEPH_ENTITY_TYPE_CLIENT);
ceph_encode_32(p, len);
ceph_encode_copy(p, name, len);
return 0;
}
/*
* Initiate protocol negotiation with monitor. Include entity name
* and list supported protocols.
*/
int ceph_auth_build_hello(struct ceph_auth_client *ac, void *buf, size_t len)
{
struct ceph_mon_request_header *monhdr = buf;
void *p = monhdr + 1, *end = buf + len, *lenp;
int i, num;
int ret;
mutex_lock(&ac->mutex);
dout("auth_build_hello\n");
monhdr->have_version = 0;
monhdr->session_mon = cpu_to_le16(-1);
monhdr->session_mon_tid = 0;
ceph_encode_32(&p, CEPH_AUTH_UNKNOWN); /* no protocol, yet */
lenp = p;
p += sizeof(u32);
ceph_decode_need(&p, end, 1 + sizeof(u32), bad);
ceph_encode_8(&p, 1);
num = ARRAY_SIZE(supported_protocols);
ceph_encode_32(&p, num);
ceph_decode_need(&p, end, num * sizeof(u32), bad);
for (i = 0; i < num; i++)
ceph_encode_32(&p, supported_protocols[i]);
ret = ceph_auth_entity_name_encode(ac->name, &p, end);
if (ret < 0)
goto out;
ceph_decode_need(&p, end, sizeof(u64), bad);
ceph_encode_64(&p, ac->global_id);
ceph_encode_32(&lenp, p - lenp - sizeof(u32));
ret = p - buf;
out:
mutex_unlock(&ac->mutex);
return ret;
bad:
ret = -ERANGE;
goto out;
}
static int build_request(struct ceph_auth_client *ac, bool add_header,
void *buf, int buf_len)
{
void *end = buf + buf_len;
void *p;
int ret;
p = buf;
if (add_header) {
/* struct ceph_mon_request_header + protocol */
ceph_encode_64_safe(&p, end, 0, e_range);
ceph_encode_16_safe(&p, end, -1, e_range);
ceph_encode_64_safe(&p, end, 0, e_range);
ceph_encode_32_safe(&p, end, ac->protocol, e_range);
}
ceph_encode_need(&p, end, sizeof(u32), e_range);
ret = ac->ops->build_request(ac, p + sizeof(u32), end);
if (ret < 0) {
pr_err("auth protocol '%s' building request failed: %d\n",
ceph_auth_proto_name(ac->protocol), ret);
return ret;
}
dout(" built request %d bytes\n", ret);
ceph_encode_32(&p, ret);
return p + ret - buf;
e_range:
return -ERANGE;
}
/*
* Handle auth message from monitor.
*/
int ceph_handle_auth_reply(struct ceph_auth_client *ac,
void *buf, size_t len,
void *reply_buf, size_t reply_len)
{
void *p = buf;
void *end = buf + len;
int protocol;
s32 result;
u64 global_id;
void *payload, *payload_end;
int payload_len;
char *result_msg;
int result_msg_len;
int ret = -EINVAL;
mutex_lock(&ac->mutex);
dout("handle_auth_reply %p %p\n", p, end);
ceph_decode_need(&p, end, sizeof(u32) * 3 + sizeof(u64), bad);
protocol = ceph_decode_32(&p);
result = ceph_decode_32(&p);
global_id = ceph_decode_64(&p);
payload_len = ceph_decode_32(&p);
payload = p;
p += payload_len;
ceph_decode_need(&p, end, sizeof(u32), bad);
result_msg_len = ceph_decode_32(&p);
result_msg = p;
p += result_msg_len;
if (p != end)
goto bad;
dout(" result %d '%.*s' gid %llu len %d\n", result, result_msg_len,
result_msg, global_id, payload_len);
payload_end = payload + payload_len;
if (ac->negotiating) {
/* server does not support our protocols? */
if (!protocol && result < 0) {
ret = result;
goto out;
}
/* set up (new) protocol handler? */
if (ac->protocol && ac->protocol != protocol) {
ac->ops->destroy(ac);
ac->protocol = 0;
ac->ops = NULL;
}
if (ac->protocol != protocol) {
ret = init_protocol(ac, protocol);
if (ret) {
pr_err("auth protocol '%s' init failed: %d\n",
ceph_auth_proto_name(protocol), ret);
goto out;
}
}
ac->negotiating = false;
}
if (result) {
pr_err("auth protocol '%s' mauth authentication failed: %d\n",
ceph_auth_proto_name(ac->protocol), result);
ret = result;
goto out;
}
ret = ac->ops->handle_reply(ac, global_id, payload, payload_end,
NULL, NULL, NULL, NULL);
if (ret == -EAGAIN) {
ret = build_request(ac, true, reply_buf, reply_len);
goto out;
} else if (ret) {
goto out;
}
out:
mutex_unlock(&ac->mutex);
return ret;
bad:
pr_err("failed to decode auth msg\n");
ret = -EINVAL;
goto out;
}
int ceph_build_auth(struct ceph_auth_client *ac,
void *msg_buf, size_t msg_len)
{
int ret = 0;
mutex_lock(&ac->mutex);
if (ac->ops->should_authenticate(ac))
ret = build_request(ac, true, msg_buf, msg_len);
mutex_unlock(&ac->mutex);
return ret;
}
int ceph_auth_is_authenticated(struct ceph_auth_client *ac)
{
int ret = 0;
mutex_lock(&ac->mutex);
if (ac->ops)
ret = ac->ops->is_authenticated(ac);
mutex_unlock(&ac->mutex);
return ret;
}
EXPORT_SYMBOL(ceph_auth_is_authenticated);
int __ceph_auth_get_authorizer(struct ceph_auth_client *ac,
struct ceph_auth_handshake *auth,
int peer_type, bool force_new,
int *proto, int *pref_mode, int *fallb_mode)
{
int ret;
mutex_lock(&ac->mutex);
if (force_new && auth->authorizer) {
ceph_auth_destroy_authorizer(auth->authorizer);
auth->authorizer = NULL;
}
if (!auth->authorizer)
ret = ac->ops->create_authorizer(ac, peer_type, auth);
else if (ac->ops->update_authorizer)
ret = ac->ops->update_authorizer(ac, peer_type, auth);
else
ret = 0;
if (ret)
goto out;
*proto = ac->protocol;
if (pref_mode && fallb_mode) {
*pref_mode = ac->preferred_mode;
*fallb_mode = ac->fallback_mode;
}
out:
mutex_unlock(&ac->mutex);
return ret;
}
EXPORT_SYMBOL(__ceph_auth_get_authorizer);
void ceph_auth_destroy_authorizer(struct ceph_authorizer *a)
{
a->destroy(a);
}
EXPORT_SYMBOL(ceph_auth_destroy_authorizer);
int ceph_auth_add_authorizer_challenge(struct ceph_auth_client *ac,
struct ceph_authorizer *a,
void *challenge_buf,
int challenge_buf_len)
{
int ret = 0;
mutex_lock(&ac->mutex);
if (ac->ops && ac->ops->add_authorizer_challenge)
ret = ac->ops->add_authorizer_challenge(ac, a, challenge_buf,
challenge_buf_len);
mutex_unlock(&ac->mutex);
return ret;
}
EXPORT_SYMBOL(ceph_auth_add_authorizer_challenge);
int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac,
struct ceph_authorizer *a,
void *reply, int reply_len,
u8 *session_key, int *session_key_len,
u8 *con_secret, int *con_secret_len)
{
int ret = 0;
mutex_lock(&ac->mutex);
if (ac->ops && ac->ops->verify_authorizer_reply)
ret = ac->ops->verify_authorizer_reply(ac, a,
reply, reply_len, session_key, session_key_len,
con_secret, con_secret_len);
mutex_unlock(&ac->mutex);
return ret;
}
EXPORT_SYMBOL(ceph_auth_verify_authorizer_reply);
void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac, int peer_type)
{
mutex_lock(&ac->mutex);
if (ac->ops && ac->ops->invalidate_authorizer)
ac->ops->invalidate_authorizer(ac, peer_type);
mutex_unlock(&ac->mutex);
}
EXPORT_SYMBOL(ceph_auth_invalidate_authorizer);
/*
* msgr2 authentication
*/
static bool contains(const int *arr, int cnt, int val)
{
int i;
for (i = 0; i < cnt; i++) {
if (arr[i] == val)
return true;
}
return false;
}
static int encode_con_modes(void **p, void *end, int pref_mode, int fallb_mode)
{
WARN_ON(pref_mode == CEPH_CON_MODE_UNKNOWN);
if (fallb_mode != CEPH_CON_MODE_UNKNOWN) {
ceph_encode_32_safe(p, end, 2, e_range);
ceph_encode_32_safe(p, end, pref_mode, e_range);
ceph_encode_32_safe(p, end, fallb_mode, e_range);
} else {
ceph_encode_32_safe(p, end, 1, e_range);
ceph_encode_32_safe(p, end, pref_mode, e_range);
}
return 0;
e_range:
return -ERANGE;
}
/*
* Similar to ceph_auth_build_hello().
*/
int ceph_auth_get_request(struct ceph_auth_client *ac, void *buf, int buf_len)
{
int proto = ac->key ? CEPH_AUTH_CEPHX : CEPH_AUTH_NONE;
void *end = buf + buf_len;
void *lenp;
void *p;
int ret;
mutex_lock(&ac->mutex);
if (ac->protocol == CEPH_AUTH_UNKNOWN) {
ret = init_protocol(ac, proto);
if (ret) {
pr_err("auth protocol '%s' init failed: %d\n",
ceph_auth_proto_name(proto), ret);
goto out;
}
} else {
WARN_ON(ac->protocol != proto);
ac->ops->reset(ac);
}
p = buf;
ceph_encode_32_safe(&p, end, ac->protocol, e_range);
ret = encode_con_modes(&p, end, ac->preferred_mode, ac->fallback_mode);
if (ret)
goto out;
lenp = p;
p += 4; /* space for len */
ceph_encode_8_safe(&p, end, CEPH_AUTH_MODE_MON, e_range);
ret = ceph_auth_entity_name_encode(ac->name, &p, end);
if (ret)
goto out;
ceph_encode_64_safe(&p, end, ac->global_id, e_range);
ceph_encode_32(&lenp, p - lenp - 4);
ret = p - buf;
out:
mutex_unlock(&ac->mutex);
return ret;
e_range:
ret = -ERANGE;
goto out;
}
int ceph_auth_handle_reply_more(struct ceph_auth_client *ac, void *reply,
int reply_len, void *buf, int buf_len)
{
int ret;
mutex_lock(&ac->mutex);
ret = ac->ops->handle_reply(ac, 0, reply, reply + reply_len,
NULL, NULL, NULL, NULL);
if (ret == -EAGAIN)
ret = build_request(ac, false, buf, buf_len);
else
WARN_ON(ret >= 0);
mutex_unlock(&ac->mutex);
return ret;
}
int ceph_auth_handle_reply_done(struct ceph_auth_client *ac,
u64 global_id, void *reply, int reply_len,
u8 *session_key, int *session_key_len,
u8 *con_secret, int *con_secret_len)
{
int ret;
mutex_lock(&ac->mutex);
ret = ac->ops->handle_reply(ac, global_id, reply, reply + reply_len,
session_key, session_key_len,
con_secret, con_secret_len);
WARN_ON(ret == -EAGAIN || ret > 0);
mutex_unlock(&ac->mutex);
return ret;
}
bool ceph_auth_handle_bad_method(struct ceph_auth_client *ac,
int used_proto, int result,
const int *allowed_protos, int proto_cnt,
const int *allowed_modes, int mode_cnt)
{
mutex_lock(&ac->mutex);
WARN_ON(used_proto != ac->protocol);
if (result == -EOPNOTSUPP) {
if (!contains(allowed_protos, proto_cnt, ac->protocol)) {
pr_err("auth protocol '%s' not allowed\n",
ceph_auth_proto_name(ac->protocol));
goto not_allowed;
}
if (!contains(allowed_modes, mode_cnt, ac->preferred_mode) &&
(ac->fallback_mode == CEPH_CON_MODE_UNKNOWN ||
!contains(allowed_modes, mode_cnt, ac->fallback_mode))) {
pr_err("preferred mode '%s' not allowed\n",
ceph_con_mode_name(ac->preferred_mode));
if (ac->fallback_mode == CEPH_CON_MODE_UNKNOWN)
pr_err("no fallback mode\n");
else
pr_err("fallback mode '%s' not allowed\n",
ceph_con_mode_name(ac->fallback_mode));
goto not_allowed;
}
}
WARN_ON(result == -EOPNOTSUPP || result >= 0);
pr_err("auth protocol '%s' msgr authentication failed: %d\n",
ceph_auth_proto_name(ac->protocol), result);
mutex_unlock(&ac->mutex);
return true;
not_allowed:
mutex_unlock(&ac->mutex);
return false;
}
int ceph_auth_get_authorizer(struct ceph_auth_client *ac,
struct ceph_auth_handshake *auth,
int peer_type, void *buf, int *buf_len)
{
void *end = buf + *buf_len;
int pref_mode, fallb_mode;
int proto;
void *p;
int ret;
ret = __ceph_auth_get_authorizer(ac, auth, peer_type, true, &proto,
&pref_mode, &fallb_mode);
if (ret)
return ret;
p = buf;
ceph_encode_32_safe(&p, end, proto, e_range);
ret = encode_con_modes(&p, end, pref_mode, fallb_mode);
if (ret)
return ret;
ceph_encode_32_safe(&p, end, auth->authorizer_buf_len, e_range);
*buf_len = p - buf;
return 0;
e_range:
return -ERANGE;
}
EXPORT_SYMBOL(ceph_auth_get_authorizer);
int ceph_auth_handle_svc_reply_more(struct ceph_auth_client *ac,
struct ceph_auth_handshake *auth,
void *reply, int reply_len,
void *buf, int *buf_len)
{
void *end = buf + *buf_len;
void *p;
int ret;
ret = ceph_auth_add_authorizer_challenge(ac, auth->authorizer,
reply, reply_len);
if (ret)
return ret;
p = buf;
ceph_encode_32_safe(&p, end, auth->authorizer_buf_len, e_range);
*buf_len = p - buf;
return 0;
e_range:
return -ERANGE;
}
EXPORT_SYMBOL(ceph_auth_handle_svc_reply_more);
int ceph_auth_handle_svc_reply_done(struct ceph_auth_client *ac,
struct ceph_auth_handshake *auth,
void *reply, int reply_len,
u8 *session_key, int *session_key_len,
u8 *con_secret, int *con_secret_len)
{
return ceph_auth_verify_authorizer_reply(ac, auth->authorizer,
reply, reply_len, session_key, session_key_len,
con_secret, con_secret_len);
}
EXPORT_SYMBOL(ceph_auth_handle_svc_reply_done);
bool ceph_auth_handle_bad_authorizer(struct ceph_auth_client *ac,
int peer_type, int used_proto, int result,
const int *allowed_protos, int proto_cnt,
const int *allowed_modes, int mode_cnt)
{
mutex_lock(&ac->mutex);
WARN_ON(used_proto != ac->protocol);
if (result == -EOPNOTSUPP) {
if (!contains(allowed_protos, proto_cnt, ac->protocol)) {
pr_err("auth protocol '%s' not allowed by %s\n",
ceph_auth_proto_name(ac->protocol),
ceph_entity_type_name(peer_type));
goto not_allowed;
}
if (!contains(allowed_modes, mode_cnt, ac->preferred_mode) &&
(ac->fallback_mode == CEPH_CON_MODE_UNKNOWN ||
!contains(allowed_modes, mode_cnt, ac->fallback_mode))) {
pr_err("preferred mode '%s' not allowed by %s\n",
ceph_con_mode_name(ac->preferred_mode),
ceph_entity_type_name(peer_type));
if (ac->fallback_mode == CEPH_CON_MODE_UNKNOWN)
pr_err("no fallback mode\n");
else
pr_err("fallback mode '%s' not allowed by %s\n",
ceph_con_mode_name(ac->fallback_mode),
ceph_entity_type_name(peer_type));
goto not_allowed;
}
}
WARN_ON(result == -EOPNOTSUPP || result >= 0);
pr_err("auth protocol '%s' authorization to %s failed: %d\n",
ceph_auth_proto_name(ac->protocol),
ceph_entity_type_name(peer_type), result);
if (ac->ops->invalidate_authorizer)
ac->ops->invalidate_authorizer(ac, peer_type);
mutex_unlock(&ac->mutex);
return true;
not_allowed:
mutex_unlock(&ac->mutex);
return false;
}
EXPORT_SYMBOL(ceph_auth_handle_bad_authorizer);
| linux-master | net/ceph/auth.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/ceph/ceph_debug.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/ceph/libceph.h>
#include <linux/ceph/mon_client.h>
#include <linux/ceph/auth.h>
#include <linux/ceph/debugfs.h>
#ifdef CONFIG_DEBUG_FS
/*
* Implement /sys/kernel/debug/ceph fun
*
* /sys/kernel/debug/ceph/client* - an instance of the ceph client
* .../osdmap - current osdmap
* .../monmap - current monmap
* .../osdc - active osd requests
* .../monc - mon client state
* .../client_options - libceph-only (i.e. not rbd or cephfs) options
* .../dentry_lru - dump contents of dentry lru
* .../caps - expose cap (reservation) stats
* .../bdi - symlink to ../../bdi/something
*/
static struct dentry *ceph_debugfs_dir;
static int monmap_show(struct seq_file *s, void *p)
{
int i;
struct ceph_client *client = s->private;
if (client->monc.monmap == NULL)
return 0;
seq_printf(s, "epoch %d\n", client->monc.monmap->epoch);
for (i = 0; i < client->monc.monmap->num_mon; i++) {
struct ceph_entity_inst *inst =
&client->monc.monmap->mon_inst[i];
seq_printf(s, "\t%s%lld\t%s\n",
ENTITY_NAME(inst->name),
ceph_pr_addr(&inst->addr));
}
return 0;
}
static int osdmap_show(struct seq_file *s, void *p)
{
int i;
struct ceph_client *client = s->private;
struct ceph_osd_client *osdc = &client->osdc;
struct ceph_osdmap *map = osdc->osdmap;
struct rb_node *n;
if (map == NULL)
return 0;
down_read(&osdc->lock);
seq_printf(s, "epoch %u barrier %u flags 0x%x\n", map->epoch,
osdc->epoch_barrier, map->flags);
for (n = rb_first(&map->pg_pools); n; n = rb_next(n)) {
struct ceph_pg_pool_info *pi =
rb_entry(n, struct ceph_pg_pool_info, node);
seq_printf(s, "pool %lld '%s' type %d size %d min_size %d pg_num %u pg_num_mask %d flags 0x%llx lfor %u read_tier %lld write_tier %lld\n",
pi->id, pi->name, pi->type, pi->size, pi->min_size,
pi->pg_num, pi->pg_num_mask, pi->flags,
pi->last_force_request_resend, pi->read_tier,
pi->write_tier);
}
for (i = 0; i < map->max_osd; i++) {
struct ceph_entity_addr *addr = &map->osd_addr[i];
u32 state = map->osd_state[i];
char sb[64];
seq_printf(s, "osd%d\t%s\t%3d%%\t(%s)\t%3d%%\t%2d\n",
i, ceph_pr_addr(addr),
((map->osd_weight[i]*100) >> 16),
ceph_osdmap_state_str(sb, sizeof(sb), state),
((ceph_get_primary_affinity(map, i)*100) >> 16),
ceph_get_crush_locality(map, i,
&client->options->crush_locs));
}
for (n = rb_first(&map->pg_temp); n; n = rb_next(n)) {
struct ceph_pg_mapping *pg =
rb_entry(n, struct ceph_pg_mapping, node);
seq_printf(s, "pg_temp %llu.%x [", pg->pgid.pool,
pg->pgid.seed);
for (i = 0; i < pg->pg_temp.len; i++)
seq_printf(s, "%s%d", (i == 0 ? "" : ","),
pg->pg_temp.osds[i]);
seq_printf(s, "]\n");
}
for (n = rb_first(&map->primary_temp); n; n = rb_next(n)) {
struct ceph_pg_mapping *pg =
rb_entry(n, struct ceph_pg_mapping, node);
seq_printf(s, "primary_temp %llu.%x %d\n", pg->pgid.pool,
pg->pgid.seed, pg->primary_temp.osd);
}
for (n = rb_first(&map->pg_upmap); n; n = rb_next(n)) {
struct ceph_pg_mapping *pg =
rb_entry(n, struct ceph_pg_mapping, node);
seq_printf(s, "pg_upmap %llu.%x [", pg->pgid.pool,
pg->pgid.seed);
for (i = 0; i < pg->pg_upmap.len; i++)
seq_printf(s, "%s%d", (i == 0 ? "" : ","),
pg->pg_upmap.osds[i]);
seq_printf(s, "]\n");
}
for (n = rb_first(&map->pg_upmap_items); n; n = rb_next(n)) {
struct ceph_pg_mapping *pg =
rb_entry(n, struct ceph_pg_mapping, node);
seq_printf(s, "pg_upmap_items %llu.%x [", pg->pgid.pool,
pg->pgid.seed);
for (i = 0; i < pg->pg_upmap_items.len; i++)
seq_printf(s, "%s%d->%d", (i == 0 ? "" : ","),
pg->pg_upmap_items.from_to[i][0],
pg->pg_upmap_items.from_to[i][1]);
seq_printf(s, "]\n");
}
up_read(&osdc->lock);
return 0;
}
static int monc_show(struct seq_file *s, void *p)
{
struct ceph_client *client = s->private;
struct ceph_mon_generic_request *req;
struct ceph_mon_client *monc = &client->monc;
struct rb_node *rp;
int i;
mutex_lock(&monc->mutex);
for (i = 0; i < ARRAY_SIZE(monc->subs); i++) {
seq_printf(s, "have %s %u", ceph_sub_str[i],
monc->subs[i].have);
if (monc->subs[i].want)
seq_printf(s, " want %llu%s",
le64_to_cpu(monc->subs[i].item.start),
(monc->subs[i].item.flags &
CEPH_SUBSCRIBE_ONETIME ? "" : "+"));
seq_putc(s, '\n');
}
seq_printf(s, "fs_cluster_id %d\n", monc->fs_cluster_id);
for (rp = rb_first(&monc->generic_request_tree); rp; rp = rb_next(rp)) {
__u16 op;
req = rb_entry(rp, struct ceph_mon_generic_request, node);
op = le16_to_cpu(req->request->hdr.type);
if (op == CEPH_MSG_STATFS)
seq_printf(s, "%llu statfs\n", req->tid);
else if (op == CEPH_MSG_MON_GET_VERSION)
seq_printf(s, "%llu mon_get_version", req->tid);
else
seq_printf(s, "%llu unknown\n", req->tid);
}
mutex_unlock(&monc->mutex);
return 0;
}
static void dump_spgid(struct seq_file *s, const struct ceph_spg *spgid)
{
seq_printf(s, "%llu.%x", spgid->pgid.pool, spgid->pgid.seed);
if (spgid->shard != CEPH_SPG_NOSHARD)
seq_printf(s, "s%d", spgid->shard);
}
static void dump_target(struct seq_file *s, struct ceph_osd_request_target *t)
{
int i;
seq_printf(s, "osd%d\t%llu.%x\t", t->osd, t->pgid.pool, t->pgid.seed);
dump_spgid(s, &t->spgid);
seq_puts(s, "\t[");
for (i = 0; i < t->up.size; i++)
seq_printf(s, "%s%d", (!i ? "" : ","), t->up.osds[i]);
seq_printf(s, "]/%d\t[", t->up.primary);
for (i = 0; i < t->acting.size; i++)
seq_printf(s, "%s%d", (!i ? "" : ","), t->acting.osds[i]);
seq_printf(s, "]/%d\te%u\t", t->acting.primary, t->epoch);
if (t->target_oloc.pool_ns) {
seq_printf(s, "%*pE/%*pE\t0x%x",
(int)t->target_oloc.pool_ns->len,
t->target_oloc.pool_ns->str,
t->target_oid.name_len, t->target_oid.name, t->flags);
} else {
seq_printf(s, "%*pE\t0x%x", t->target_oid.name_len,
t->target_oid.name, t->flags);
}
if (t->paused)
seq_puts(s, "\tP");
}
static void dump_request(struct seq_file *s, struct ceph_osd_request *req)
{
int i;
seq_printf(s, "%llu\t", req->r_tid);
dump_target(s, &req->r_t);
seq_printf(s, "\t%d", req->r_attempts);
for (i = 0; i < req->r_num_ops; i++) {
struct ceph_osd_req_op *op = &req->r_ops[i];
seq_printf(s, "%s%s", (i == 0 ? "\t" : ","),
ceph_osd_op_name(op->op));
if (op->op == CEPH_OSD_OP_WATCH)
seq_printf(s, "-%s",
ceph_osd_watch_op_name(op->watch.op));
else if (op->op == CEPH_OSD_OP_CALL)
seq_printf(s, "-%s/%s", op->cls.class_name,
op->cls.method_name);
}
seq_putc(s, '\n');
}
static void dump_requests(struct seq_file *s, struct ceph_osd *osd)
{
struct rb_node *n;
mutex_lock(&osd->lock);
for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
struct ceph_osd_request *req =
rb_entry(n, struct ceph_osd_request, r_node);
dump_request(s, req);
}
mutex_unlock(&osd->lock);
}
static void dump_linger_request(struct seq_file *s,
struct ceph_osd_linger_request *lreq)
{
seq_printf(s, "%llu\t", lreq->linger_id);
dump_target(s, &lreq->t);
seq_printf(s, "\t%u\t%s%s/%d\n", lreq->register_gen,
lreq->is_watch ? "W" : "N", lreq->committed ? "C" : "",
lreq->last_error);
}
static void dump_linger_requests(struct seq_file *s, struct ceph_osd *osd)
{
struct rb_node *n;
mutex_lock(&osd->lock);
for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) {
struct ceph_osd_linger_request *lreq =
rb_entry(n, struct ceph_osd_linger_request, node);
dump_linger_request(s, lreq);
}
mutex_unlock(&osd->lock);
}
static void dump_snapid(struct seq_file *s, u64 snapid)
{
if (snapid == CEPH_NOSNAP)
seq_puts(s, "head");
else if (snapid == CEPH_SNAPDIR)
seq_puts(s, "snapdir");
else
seq_printf(s, "%llx", snapid);
}
static void dump_name_escaped(struct seq_file *s, unsigned char *name,
size_t len)
{
size_t i;
for (i = 0; i < len; i++) {
if (name[i] == '%' || name[i] == ':' || name[i] == '/' ||
name[i] < 32 || name[i] >= 127) {
seq_printf(s, "%%%02x", name[i]);
} else {
seq_putc(s, name[i]);
}
}
}
static void dump_hoid(struct seq_file *s, const struct ceph_hobject_id *hoid)
{
if (hoid->snapid == 0 && hoid->hash == 0 && !hoid->is_max &&
hoid->pool == S64_MIN) {
seq_puts(s, "MIN");
return;
}
if (hoid->is_max) {
seq_puts(s, "MAX");
return;
}
seq_printf(s, "%lld:%08x:", hoid->pool, hoid->hash_reverse_bits);
dump_name_escaped(s, hoid->nspace, hoid->nspace_len);
seq_putc(s, ':');
dump_name_escaped(s, hoid->key, hoid->key_len);
seq_putc(s, ':');
dump_name_escaped(s, hoid->oid, hoid->oid_len);
seq_putc(s, ':');
dump_snapid(s, hoid->snapid);
}
static void dump_backoffs(struct seq_file *s, struct ceph_osd *osd)
{
struct rb_node *n;
mutex_lock(&osd->lock);
for (n = rb_first(&osd->o_backoffs_by_id); n; n = rb_next(n)) {
struct ceph_osd_backoff *backoff =
rb_entry(n, struct ceph_osd_backoff, id_node);
seq_printf(s, "osd%d\t", osd->o_osd);
dump_spgid(s, &backoff->spgid);
seq_printf(s, "\t%llu\t", backoff->id);
dump_hoid(s, backoff->begin);
seq_putc(s, '\t');
dump_hoid(s, backoff->end);
seq_putc(s, '\n');
}
mutex_unlock(&osd->lock);
}
static int osdc_show(struct seq_file *s, void *pp)
{
struct ceph_client *client = s->private;
struct ceph_osd_client *osdc = &client->osdc;
struct rb_node *n;
down_read(&osdc->lock);
seq_printf(s, "REQUESTS %d homeless %d\n",
atomic_read(&osdc->num_requests),
atomic_read(&osdc->num_homeless));
for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
dump_requests(s, osd);
}
dump_requests(s, &osdc->homeless_osd);
seq_puts(s, "LINGER REQUESTS\n");
for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
dump_linger_requests(s, osd);
}
dump_linger_requests(s, &osdc->homeless_osd);
seq_puts(s, "BACKOFFS\n");
for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
dump_backoffs(s, osd);
}
up_read(&osdc->lock);
return 0;
}
static int client_options_show(struct seq_file *s, void *p)
{
struct ceph_client *client = s->private;
int ret;
ret = ceph_print_client_options(s, client, true);
if (ret)
return ret;
seq_putc(s, '\n');
return 0;
}
DEFINE_SHOW_ATTRIBUTE(monmap);
DEFINE_SHOW_ATTRIBUTE(osdmap);
DEFINE_SHOW_ATTRIBUTE(monc);
DEFINE_SHOW_ATTRIBUTE(osdc);
DEFINE_SHOW_ATTRIBUTE(client_options);
void __init ceph_debugfs_init(void)
{
ceph_debugfs_dir = debugfs_create_dir("ceph", NULL);
}
void ceph_debugfs_cleanup(void)
{
debugfs_remove(ceph_debugfs_dir);
}
void ceph_debugfs_client_init(struct ceph_client *client)
{
char name[80];
snprintf(name, sizeof(name), "%pU.client%lld", &client->fsid,
client->monc.auth->global_id);
dout("ceph_debugfs_client_init %p %s\n", client, name);
client->debugfs_dir = debugfs_create_dir(name, ceph_debugfs_dir);
client->monc.debugfs_file = debugfs_create_file("monc",
0400,
client->debugfs_dir,
client,
&monc_fops);
client->osdc.debugfs_file = debugfs_create_file("osdc",
0400,
client->debugfs_dir,
client,
&osdc_fops);
client->debugfs_monmap = debugfs_create_file("monmap",
0400,
client->debugfs_dir,
client,
&monmap_fops);
client->debugfs_osdmap = debugfs_create_file("osdmap",
0400,
client->debugfs_dir,
client,
&osdmap_fops);
client->debugfs_options = debugfs_create_file("client_options",
0400,
client->debugfs_dir,
client,
&client_options_fops);
}
void ceph_debugfs_client_cleanup(struct ceph_client *client)
{
dout("ceph_debugfs_client_cleanup %p\n", client);
debugfs_remove(client->debugfs_options);
debugfs_remove(client->debugfs_osdmap);
debugfs_remove(client->debugfs_monmap);
debugfs_remove(client->osdc.debugfs_file);
debugfs_remove(client->monc.debugfs_file);
debugfs_remove(client->debugfs_dir);
}
#else /* CONFIG_DEBUG_FS */
void __init ceph_debugfs_init(void)
{
}
void ceph_debugfs_cleanup(void)
{
}
void ceph_debugfs_client_init(struct ceph_client *client)
{
}
void ceph_debugfs_client_cleanup(struct ceph_client *client)
{
}
#endif /* CONFIG_DEBUG_FS */
| linux-master | net/ceph/debugfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Ceph string constants
*/
#include <linux/module.h>
#include <linux/ceph/types.h>
const char *ceph_entity_type_name(int type)
{
switch (type) {
case CEPH_ENTITY_TYPE_MDS: return "mds";
case CEPH_ENTITY_TYPE_OSD: return "osd";
case CEPH_ENTITY_TYPE_MON: return "mon";
case CEPH_ENTITY_TYPE_CLIENT: return "client";
case CEPH_ENTITY_TYPE_AUTH: return "auth";
default: return "unknown";
}
}
EXPORT_SYMBOL(ceph_entity_type_name);
const char *ceph_auth_proto_name(int proto)
{
switch (proto) {
case CEPH_AUTH_UNKNOWN:
return "unknown";
case CEPH_AUTH_NONE:
return "none";
case CEPH_AUTH_CEPHX:
return "cephx";
default:
return "???";
}
}
const char *ceph_con_mode_name(int mode)
{
switch (mode) {
case CEPH_CON_MODE_UNKNOWN:
return "unknown";
case CEPH_CON_MODE_CRC:
return "crc";
case CEPH_CON_MODE_SECURE:
return "secure";
default:
return "???";
}
}
const char *ceph_osd_op_name(int op)
{
switch (op) {
#define GENERATE_CASE(op, opcode, str) case CEPH_OSD_OP_##op: return (str);
__CEPH_FORALL_OSD_OPS(GENERATE_CASE)
#undef GENERATE_CASE
default:
return "???";
}
}
const char *ceph_osd_watch_op_name(int o)
{
switch (o) {
case CEPH_OSD_WATCH_OP_UNWATCH:
return "unwatch";
case CEPH_OSD_WATCH_OP_WATCH:
return "watch";
case CEPH_OSD_WATCH_OP_RECONNECT:
return "reconnect";
case CEPH_OSD_WATCH_OP_PING:
return "ping";
default:
return "???";
}
}
const char *ceph_osd_state_name(int s)
{
switch (s) {
case CEPH_OSD_EXISTS:
return "exists";
case CEPH_OSD_UP:
return "up";
case CEPH_OSD_AUTOOUT:
return "autoout";
case CEPH_OSD_NEW:
return "new";
default:
return "???";
}
}
| linux-master | net/ceph/ceph_strings.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/ceph/ceph_debug.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/ceph/ceph_features.h>
#include <linux/ceph/mon_client.h>
#include <linux/ceph/libceph.h>
#include <linux/ceph/debugfs.h>
#include <linux/ceph/decode.h>
#include <linux/ceph/auth.h>
/*
* Interact with Ceph monitor cluster. Handle requests for new map
* versions, and periodically resend as needed. Also implement
* statfs() and umount().
*
* A small cluster of Ceph "monitors" are responsible for managing critical
* cluster configuration and state information. An odd number (e.g., 3, 5)
* of cmon daemons use a modified version of the Paxos part-time parliament
* algorithm to manage the MDS map (mds cluster membership), OSD map, and
* list of clients who have mounted the file system.
*
* We maintain an open, active session with a monitor at all times in order to
* receive timely MDSMap updates. We periodically send a keepalive byte on the
* TCP socket to ensure we detect a failure. If the connection does break, we
* randomly hunt for a new monitor. Once the connection is reestablished, we
* resend any outstanding requests.
*/
static const struct ceph_connection_operations mon_con_ops;
static int __validate_auth(struct ceph_mon_client *monc);
static int decode_mon_info(void **p, void *end, bool msgr2,
struct ceph_entity_addr *addr)
{
void *mon_info_end;
u32 struct_len;
u8 struct_v;
int ret;
ret = ceph_start_decoding(p, end, 1, "mon_info_t", &struct_v,
&struct_len);
if (ret)
return ret;
mon_info_end = *p + struct_len;
ceph_decode_skip_string(p, end, e_inval); /* skip mon name */
ret = ceph_decode_entity_addrvec(p, end, msgr2, addr);
if (ret)
return ret;
*p = mon_info_end;
return 0;
e_inval:
return -EINVAL;
}
/*
* Decode a monmap blob (e.g., during mount).
*
* Assume MonMap v3 (i.e. encoding with MONNAMES and MONENC).
*/
static struct ceph_monmap *ceph_monmap_decode(void **p, void *end, bool msgr2)
{
struct ceph_monmap *monmap = NULL;
struct ceph_fsid fsid;
u32 struct_len;
int blob_len;
int num_mon;
u8 struct_v;
u32 epoch;
int ret;
int i;
ceph_decode_32_safe(p, end, blob_len, e_inval);
ceph_decode_need(p, end, blob_len, e_inval);
ret = ceph_start_decoding(p, end, 6, "monmap", &struct_v, &struct_len);
if (ret)
goto fail;
dout("%s struct_v %d\n", __func__, struct_v);
ceph_decode_copy_safe(p, end, &fsid, sizeof(fsid), e_inval);
ceph_decode_32_safe(p, end, epoch, e_inval);
if (struct_v >= 6) {
u32 feat_struct_len;
u8 feat_struct_v;
*p += sizeof(struct ceph_timespec); /* skip last_changed */
*p += sizeof(struct ceph_timespec); /* skip created */
ret = ceph_start_decoding(p, end, 1, "mon_feature_t",
&feat_struct_v, &feat_struct_len);
if (ret)
goto fail;
*p += feat_struct_len; /* skip persistent_features */
ret = ceph_start_decoding(p, end, 1, "mon_feature_t",
&feat_struct_v, &feat_struct_len);
if (ret)
goto fail;
*p += feat_struct_len; /* skip optional_features */
}
ceph_decode_32_safe(p, end, num_mon, e_inval);
dout("%s fsid %pU epoch %u num_mon %d\n", __func__, &fsid, epoch,
num_mon);
if (num_mon > CEPH_MAX_MON)
goto e_inval;
monmap = kmalloc(struct_size(monmap, mon_inst, num_mon), GFP_NOIO);
if (!monmap) {
ret = -ENOMEM;
goto fail;
}
monmap->fsid = fsid;
monmap->epoch = epoch;
monmap->num_mon = num_mon;
/* legacy_mon_addr map or mon_info map */
for (i = 0; i < num_mon; i++) {
struct ceph_entity_inst *inst = &monmap->mon_inst[i];
ceph_decode_skip_string(p, end, e_inval); /* skip mon name */
inst->name.type = CEPH_ENTITY_TYPE_MON;
inst->name.num = cpu_to_le64(i);
if (struct_v >= 6)
ret = decode_mon_info(p, end, msgr2, &inst->addr);
else
ret = ceph_decode_entity_addr(p, end, &inst->addr);
if (ret)
goto fail;
dout("%s mon%d addr %s\n", __func__, i,
ceph_pr_addr(&inst->addr));
}
return monmap;
e_inval:
ret = -EINVAL;
fail:
kfree(monmap);
return ERR_PTR(ret);
}
/*
* return true if *addr is included in the monmap.
*/
int ceph_monmap_contains(struct ceph_monmap *m, struct ceph_entity_addr *addr)
{
int i;
for (i = 0; i < m->num_mon; i++) {
if (ceph_addr_equal_no_type(addr, &m->mon_inst[i].addr))
return 1;
}
return 0;
}
/*
* Send an auth request.
*/
static void __send_prepared_auth_request(struct ceph_mon_client *monc, int len)
{
monc->pending_auth = 1;
monc->m_auth->front.iov_len = len;
monc->m_auth->hdr.front_len = cpu_to_le32(len);
ceph_msg_revoke(monc->m_auth);
ceph_msg_get(monc->m_auth); /* keep our ref */
ceph_con_send(&monc->con, monc->m_auth);
}
/*
* Close monitor session, if any.
*/
static void __close_session(struct ceph_mon_client *monc)
{
dout("__close_session closing mon%d\n", monc->cur_mon);
ceph_msg_revoke(monc->m_auth);
ceph_msg_revoke_incoming(monc->m_auth_reply);
ceph_msg_revoke(monc->m_subscribe);
ceph_msg_revoke_incoming(monc->m_subscribe_ack);
ceph_con_close(&monc->con);
monc->pending_auth = 0;
ceph_auth_reset(monc->auth);
}
/*
* Pick a new monitor at random and set cur_mon. If we are repicking
* (i.e. cur_mon is already set), be sure to pick a different one.
*/
static void pick_new_mon(struct ceph_mon_client *monc)
{
int old_mon = monc->cur_mon;
BUG_ON(monc->monmap->num_mon < 1);
if (monc->monmap->num_mon == 1) {
monc->cur_mon = 0;
} else {
int max = monc->monmap->num_mon;
int o = -1;
int n;
if (monc->cur_mon >= 0) {
if (monc->cur_mon < monc->monmap->num_mon)
o = monc->cur_mon;
if (o >= 0)
max--;
}
n = get_random_u32_below(max);
if (o >= 0 && n >= o)
n++;
monc->cur_mon = n;
}
dout("%s mon%d -> mon%d out of %d mons\n", __func__, old_mon,
monc->cur_mon, monc->monmap->num_mon);
}
/*
* Open a session with a new monitor.
*/
static void __open_session(struct ceph_mon_client *monc)
{
int ret;
pick_new_mon(monc);
monc->hunting = true;
if (monc->had_a_connection) {
monc->hunt_mult *= CEPH_MONC_HUNT_BACKOFF;
if (monc->hunt_mult > CEPH_MONC_HUNT_MAX_MULT)
monc->hunt_mult = CEPH_MONC_HUNT_MAX_MULT;
}
monc->sub_renew_after = jiffies; /* i.e., expired */
monc->sub_renew_sent = 0;
dout("%s opening mon%d\n", __func__, monc->cur_mon);
ceph_con_open(&monc->con, CEPH_ENTITY_TYPE_MON, monc->cur_mon,
&monc->monmap->mon_inst[monc->cur_mon].addr);
/*
* Queue a keepalive to ensure that in case of an early fault
* the messenger doesn't put us into STANDBY state and instead
* retries. This also ensures that our timestamp is valid by
* the time we finish hunting and delayed_work() checks it.
*/
ceph_con_keepalive(&monc->con);
if (ceph_msgr2(monc->client)) {
monc->pending_auth = 1;
return;
}
/* initiate authentication handshake */
ret = ceph_auth_build_hello(monc->auth,
monc->m_auth->front.iov_base,
monc->m_auth->front_alloc_len);
BUG_ON(ret <= 0);
__send_prepared_auth_request(monc, ret);
}
static void reopen_session(struct ceph_mon_client *monc)
{
if (!monc->hunting)
pr_info("mon%d %s session lost, hunting for new mon\n",
monc->cur_mon, ceph_pr_addr(&monc->con.peer_addr));
__close_session(monc);
__open_session(monc);
}
void ceph_monc_reopen_session(struct ceph_mon_client *monc)
{
mutex_lock(&monc->mutex);
reopen_session(monc);
mutex_unlock(&monc->mutex);
}
static void un_backoff(struct ceph_mon_client *monc)
{
monc->hunt_mult /= 2; /* reduce by 50% */
if (monc->hunt_mult < 1)
monc->hunt_mult = 1;
dout("%s hunt_mult now %d\n", __func__, monc->hunt_mult);
}
/*
* Reschedule delayed work timer.
*/
static void __schedule_delayed(struct ceph_mon_client *monc)
{
unsigned long delay;
if (monc->hunting)
delay = CEPH_MONC_HUNT_INTERVAL * monc->hunt_mult;
else
delay = CEPH_MONC_PING_INTERVAL;
dout("__schedule_delayed after %lu\n", delay);
mod_delayed_work(system_wq, &monc->delayed_work,
round_jiffies_relative(delay));
}
const char *ceph_sub_str[] = {
[CEPH_SUB_MONMAP] = "monmap",
[CEPH_SUB_OSDMAP] = "osdmap",
[CEPH_SUB_FSMAP] = "fsmap.user",
[CEPH_SUB_MDSMAP] = "mdsmap",
};
/*
* Send subscribe request for one or more maps, according to
* monc->subs.
*/
static void __send_subscribe(struct ceph_mon_client *monc)
{
struct ceph_msg *msg = monc->m_subscribe;
void *p = msg->front.iov_base;
void *const end = p + msg->front_alloc_len;
int num = 0;
int i;
dout("%s sent %lu\n", __func__, monc->sub_renew_sent);
BUG_ON(monc->cur_mon < 0);
if (!monc->sub_renew_sent)
monc->sub_renew_sent = jiffies | 1; /* never 0 */
msg->hdr.version = cpu_to_le16(2);
for (i = 0; i < ARRAY_SIZE(monc->subs); i++) {
if (monc->subs[i].want)
num++;
}
BUG_ON(num < 1); /* monmap sub is always there */
ceph_encode_32(&p, num);
for (i = 0; i < ARRAY_SIZE(monc->subs); i++) {
char buf[32];
int len;
if (!monc->subs[i].want)
continue;
len = sprintf(buf, "%s", ceph_sub_str[i]);
if (i == CEPH_SUB_MDSMAP &&
monc->fs_cluster_id != CEPH_FS_CLUSTER_ID_NONE)
len += sprintf(buf + len, ".%d", monc->fs_cluster_id);
dout("%s %s start %llu flags 0x%x\n", __func__, buf,
le64_to_cpu(monc->subs[i].item.start),
monc->subs[i].item.flags);
ceph_encode_string(&p, end, buf, len);
memcpy(p, &monc->subs[i].item, sizeof(monc->subs[i].item));
p += sizeof(monc->subs[i].item);
}
BUG_ON(p > end);
msg->front.iov_len = p - msg->front.iov_base;
msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
ceph_msg_revoke(msg);
ceph_con_send(&monc->con, ceph_msg_get(msg));
}
static void handle_subscribe_ack(struct ceph_mon_client *monc,
struct ceph_msg *msg)
{
unsigned int seconds;
struct ceph_mon_subscribe_ack *h = msg->front.iov_base;
if (msg->front.iov_len < sizeof(*h))
goto bad;
seconds = le32_to_cpu(h->duration);
mutex_lock(&monc->mutex);
if (monc->sub_renew_sent) {
/*
* This is only needed for legacy (infernalis or older)
* MONs -- see delayed_work().
*/
monc->sub_renew_after = monc->sub_renew_sent +
(seconds >> 1) * HZ - 1;
dout("%s sent %lu duration %d renew after %lu\n", __func__,
monc->sub_renew_sent, seconds, monc->sub_renew_after);
monc->sub_renew_sent = 0;
} else {
dout("%s sent %lu renew after %lu, ignoring\n", __func__,
monc->sub_renew_sent, monc->sub_renew_after);
}
mutex_unlock(&monc->mutex);
return;
bad:
pr_err("got corrupt subscribe-ack msg\n");
ceph_msg_dump(msg);
}
/*
* Register interest in a map
*
* @sub: one of CEPH_SUB_*
* @epoch: X for "every map since X", or 0 for "just the latest"
*/
static bool __ceph_monc_want_map(struct ceph_mon_client *monc, int sub,
u32 epoch, bool continuous)
{
__le64 start = cpu_to_le64(epoch);
u8 flags = !continuous ? CEPH_SUBSCRIBE_ONETIME : 0;
dout("%s %s epoch %u continuous %d\n", __func__, ceph_sub_str[sub],
epoch, continuous);
if (monc->subs[sub].want &&
monc->subs[sub].item.start == start &&
monc->subs[sub].item.flags == flags)
return false;
monc->subs[sub].item.start = start;
monc->subs[sub].item.flags = flags;
monc->subs[sub].want = true;
return true;
}
bool ceph_monc_want_map(struct ceph_mon_client *monc, int sub, u32 epoch,
bool continuous)
{
bool need_request;
mutex_lock(&monc->mutex);
need_request = __ceph_monc_want_map(monc, sub, epoch, continuous);
mutex_unlock(&monc->mutex);
return need_request;
}
EXPORT_SYMBOL(ceph_monc_want_map);
/*
* Keep track of which maps we have
*
* @sub: one of CEPH_SUB_*
*/
static void __ceph_monc_got_map(struct ceph_mon_client *monc, int sub,
u32 epoch)
{
dout("%s %s epoch %u\n", __func__, ceph_sub_str[sub], epoch);
if (monc->subs[sub].want) {
if (monc->subs[sub].item.flags & CEPH_SUBSCRIBE_ONETIME)
monc->subs[sub].want = false;
else
monc->subs[sub].item.start = cpu_to_le64(epoch + 1);
}
monc->subs[sub].have = epoch;
}
void ceph_monc_got_map(struct ceph_mon_client *monc, int sub, u32 epoch)
{
mutex_lock(&monc->mutex);
__ceph_monc_got_map(monc, sub, epoch);
mutex_unlock(&monc->mutex);
}
EXPORT_SYMBOL(ceph_monc_got_map);
void ceph_monc_renew_subs(struct ceph_mon_client *monc)
{
mutex_lock(&monc->mutex);
__send_subscribe(monc);
mutex_unlock(&monc->mutex);
}
EXPORT_SYMBOL(ceph_monc_renew_subs);
/*
* Wait for an osdmap with a given epoch.
*
* @epoch: epoch to wait for
* @timeout: in jiffies, 0 means "wait forever"
*/
int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch,
unsigned long timeout)
{
unsigned long started = jiffies;
long ret;
mutex_lock(&monc->mutex);
while (monc->subs[CEPH_SUB_OSDMAP].have < epoch) {
mutex_unlock(&monc->mutex);
if (timeout && time_after_eq(jiffies, started + timeout))
return -ETIMEDOUT;
ret = wait_event_interruptible_timeout(monc->client->auth_wq,
monc->subs[CEPH_SUB_OSDMAP].have >= epoch,
ceph_timeout_jiffies(timeout));
if (ret < 0)
return ret;
mutex_lock(&monc->mutex);
}
mutex_unlock(&monc->mutex);
return 0;
}
EXPORT_SYMBOL(ceph_monc_wait_osdmap);
/*
* Open a session with a random monitor. Request monmap and osdmap,
* which are waited upon in __ceph_open_session().
*/
int ceph_monc_open_session(struct ceph_mon_client *monc)
{
mutex_lock(&monc->mutex);
__ceph_monc_want_map(monc, CEPH_SUB_MONMAP, 0, true);
__ceph_monc_want_map(monc, CEPH_SUB_OSDMAP, 0, false);
__open_session(monc);
__schedule_delayed(monc);
mutex_unlock(&monc->mutex);
return 0;
}
EXPORT_SYMBOL(ceph_monc_open_session);
static void ceph_monc_handle_map(struct ceph_mon_client *monc,
struct ceph_msg *msg)
{
struct ceph_client *client = monc->client;
struct ceph_monmap *monmap;
void *p, *end;
mutex_lock(&monc->mutex);
dout("handle_monmap\n");
p = msg->front.iov_base;
end = p + msg->front.iov_len;
monmap = ceph_monmap_decode(&p, end, ceph_msgr2(client));
if (IS_ERR(monmap)) {
pr_err("problem decoding monmap, %d\n",
(int)PTR_ERR(monmap));
ceph_msg_dump(msg);
goto out;
}
if (ceph_check_fsid(client, &monmap->fsid) < 0) {
kfree(monmap);
goto out;
}
kfree(monc->monmap);
monc->monmap = monmap;
__ceph_monc_got_map(monc, CEPH_SUB_MONMAP, monc->monmap->epoch);
client->have_fsid = true;
out:
mutex_unlock(&monc->mutex);
wake_up_all(&client->auth_wq);
}
/*
* generic requests (currently statfs, mon_get_version)
*/
DEFINE_RB_FUNCS(generic_request, struct ceph_mon_generic_request, tid, node)
static void release_generic_request(struct kref *kref)
{
struct ceph_mon_generic_request *req =
container_of(kref, struct ceph_mon_generic_request, kref);
dout("%s greq %p request %p reply %p\n", __func__, req, req->request,
req->reply);
WARN_ON(!RB_EMPTY_NODE(&req->node));
if (req->reply)
ceph_msg_put(req->reply);
if (req->request)
ceph_msg_put(req->request);
kfree(req);
}
static void put_generic_request(struct ceph_mon_generic_request *req)
{
if (req)
kref_put(&req->kref, release_generic_request);
}
static void get_generic_request(struct ceph_mon_generic_request *req)
{
kref_get(&req->kref);
}
static struct ceph_mon_generic_request *
alloc_generic_request(struct ceph_mon_client *monc, gfp_t gfp)
{
struct ceph_mon_generic_request *req;
req = kzalloc(sizeof(*req), gfp);
if (!req)
return NULL;
req->monc = monc;
kref_init(&req->kref);
RB_CLEAR_NODE(&req->node);
init_completion(&req->completion);
dout("%s greq %p\n", __func__, req);
return req;
}
static void register_generic_request(struct ceph_mon_generic_request *req)
{
struct ceph_mon_client *monc = req->monc;
WARN_ON(req->tid);
get_generic_request(req);
req->tid = ++monc->last_tid;
insert_generic_request(&monc->generic_request_tree, req);
}
static void send_generic_request(struct ceph_mon_client *monc,
struct ceph_mon_generic_request *req)
{
WARN_ON(!req->tid);
dout("%s greq %p tid %llu\n", __func__, req, req->tid);
req->request->hdr.tid = cpu_to_le64(req->tid);
ceph_con_send(&monc->con, ceph_msg_get(req->request));
}
static void __finish_generic_request(struct ceph_mon_generic_request *req)
{
struct ceph_mon_client *monc = req->monc;
dout("%s greq %p tid %llu\n", __func__, req, req->tid);
erase_generic_request(&monc->generic_request_tree, req);
ceph_msg_revoke(req->request);
ceph_msg_revoke_incoming(req->reply);
}
static void finish_generic_request(struct ceph_mon_generic_request *req)
{
__finish_generic_request(req);
put_generic_request(req);
}
static void complete_generic_request(struct ceph_mon_generic_request *req)
{
if (req->complete_cb)
req->complete_cb(req);
else
complete_all(&req->completion);
put_generic_request(req);
}
static void cancel_generic_request(struct ceph_mon_generic_request *req)
{
struct ceph_mon_client *monc = req->monc;
struct ceph_mon_generic_request *lookup_req;
dout("%s greq %p tid %llu\n", __func__, req, req->tid);
mutex_lock(&monc->mutex);
lookup_req = lookup_generic_request(&monc->generic_request_tree,
req->tid);
if (lookup_req) {
WARN_ON(lookup_req != req);
finish_generic_request(req);
}
mutex_unlock(&monc->mutex);
}
static int wait_generic_request(struct ceph_mon_generic_request *req)
{
int ret;
dout("%s greq %p tid %llu\n", __func__, req, req->tid);
ret = wait_for_completion_interruptible(&req->completion);
if (ret)
cancel_generic_request(req);
else
ret = req->result; /* completed */
return ret;
}
static struct ceph_msg *get_generic_reply(struct ceph_connection *con,
struct ceph_msg_header *hdr,
int *skip)
{
struct ceph_mon_client *monc = con->private;
struct ceph_mon_generic_request *req;
u64 tid = le64_to_cpu(hdr->tid);
struct ceph_msg *m;
mutex_lock(&monc->mutex);
req = lookup_generic_request(&monc->generic_request_tree, tid);
if (!req) {
dout("get_generic_reply %lld dne\n", tid);
*skip = 1;
m = NULL;
} else {
dout("get_generic_reply %lld got %p\n", tid, req->reply);
*skip = 0;
m = ceph_msg_get(req->reply);
/*
* we don't need to track the connection reading into
* this reply because we only have one open connection
* at a time, ever.
*/
}
mutex_unlock(&monc->mutex);
return m;
}
/*
* statfs
*/
static void handle_statfs_reply(struct ceph_mon_client *monc,
struct ceph_msg *msg)
{
struct ceph_mon_generic_request *req;
struct ceph_mon_statfs_reply *reply = msg->front.iov_base;
u64 tid = le64_to_cpu(msg->hdr.tid);
dout("%s msg %p tid %llu\n", __func__, msg, tid);
if (msg->front.iov_len != sizeof(*reply))
goto bad;
mutex_lock(&monc->mutex);
req = lookup_generic_request(&monc->generic_request_tree, tid);
if (!req) {
mutex_unlock(&monc->mutex);
return;
}
req->result = 0;
*req->u.st = reply->st; /* struct */
__finish_generic_request(req);
mutex_unlock(&monc->mutex);
complete_generic_request(req);
return;
bad:
pr_err("corrupt statfs reply, tid %llu\n", tid);
ceph_msg_dump(msg);
}
/*
* Do a synchronous statfs().
*/
int ceph_monc_do_statfs(struct ceph_mon_client *monc, u64 data_pool,
struct ceph_statfs *buf)
{
struct ceph_mon_generic_request *req;
struct ceph_mon_statfs *h;
int ret = -ENOMEM;
req = alloc_generic_request(monc, GFP_NOFS);
if (!req)
goto out;
req->request = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), GFP_NOFS,
true);
if (!req->request)
goto out;
req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 64, GFP_NOFS, true);
if (!req->reply)
goto out;
req->u.st = buf;
req->request->hdr.version = cpu_to_le16(2);
mutex_lock(&monc->mutex);
register_generic_request(req);
/* fill out request */
h = req->request->front.iov_base;
h->monhdr.have_version = 0;
h->monhdr.session_mon = cpu_to_le16(-1);
h->monhdr.session_mon_tid = 0;
h->fsid = monc->monmap->fsid;
h->contains_data_pool = (data_pool != CEPH_NOPOOL);
h->data_pool = cpu_to_le64(data_pool);
send_generic_request(monc, req);
mutex_unlock(&monc->mutex);
ret = wait_generic_request(req);
out:
put_generic_request(req);
return ret;
}
EXPORT_SYMBOL(ceph_monc_do_statfs);
static void handle_get_version_reply(struct ceph_mon_client *monc,
struct ceph_msg *msg)
{
struct ceph_mon_generic_request *req;
u64 tid = le64_to_cpu(msg->hdr.tid);
void *p = msg->front.iov_base;
void *end = p + msg->front_alloc_len;
u64 handle;
dout("%s msg %p tid %llu\n", __func__, msg, tid);
ceph_decode_need(&p, end, 2*sizeof(u64), bad);
handle = ceph_decode_64(&p);
if (tid != 0 && tid != handle)
goto bad;
mutex_lock(&monc->mutex);
req = lookup_generic_request(&monc->generic_request_tree, handle);
if (!req) {
mutex_unlock(&monc->mutex);
return;
}
req->result = 0;
req->u.newest = ceph_decode_64(&p);
__finish_generic_request(req);
mutex_unlock(&monc->mutex);
complete_generic_request(req);
return;
bad:
pr_err("corrupt mon_get_version reply, tid %llu\n", tid);
ceph_msg_dump(msg);
}
static struct ceph_mon_generic_request *
__ceph_monc_get_version(struct ceph_mon_client *monc, const char *what,
ceph_monc_callback_t cb, u64 private_data)
{
struct ceph_mon_generic_request *req;
req = alloc_generic_request(monc, GFP_NOIO);
if (!req)
goto err_put_req;
req->request = ceph_msg_new(CEPH_MSG_MON_GET_VERSION,
sizeof(u64) + sizeof(u32) + strlen(what),
GFP_NOIO, true);
if (!req->request)
goto err_put_req;
req->reply = ceph_msg_new(CEPH_MSG_MON_GET_VERSION_REPLY, 32, GFP_NOIO,
true);
if (!req->reply)
goto err_put_req;
req->complete_cb = cb;
req->private_data = private_data;
mutex_lock(&monc->mutex);
register_generic_request(req);
{
void *p = req->request->front.iov_base;
void *const end = p + req->request->front_alloc_len;
ceph_encode_64(&p, req->tid); /* handle */
ceph_encode_string(&p, end, what, strlen(what));
WARN_ON(p != end);
}
send_generic_request(monc, req);
mutex_unlock(&monc->mutex);
return req;
err_put_req:
put_generic_request(req);
return ERR_PTR(-ENOMEM);
}
/*
* Send MMonGetVersion and wait for the reply.
*
* @what: one of "mdsmap", "osdmap" or "monmap"
*/
int ceph_monc_get_version(struct ceph_mon_client *monc, const char *what,
u64 *newest)
{
struct ceph_mon_generic_request *req;
int ret;
req = __ceph_monc_get_version(monc, what, NULL, 0);
if (IS_ERR(req))
return PTR_ERR(req);
ret = wait_generic_request(req);
if (!ret)
*newest = req->u.newest;
put_generic_request(req);
return ret;
}
EXPORT_SYMBOL(ceph_monc_get_version);
/*
* Send MMonGetVersion,
*
* @what: one of "mdsmap", "osdmap" or "monmap"
*/
int ceph_monc_get_version_async(struct ceph_mon_client *monc, const char *what,
ceph_monc_callback_t cb, u64 private_data)
{
struct ceph_mon_generic_request *req;
req = __ceph_monc_get_version(monc, what, cb, private_data);
if (IS_ERR(req))
return PTR_ERR(req);
put_generic_request(req);
return 0;
}
EXPORT_SYMBOL(ceph_monc_get_version_async);
static void handle_command_ack(struct ceph_mon_client *monc,
struct ceph_msg *msg)
{
struct ceph_mon_generic_request *req;
void *p = msg->front.iov_base;
void *const end = p + msg->front_alloc_len;
u64 tid = le64_to_cpu(msg->hdr.tid);
dout("%s msg %p tid %llu\n", __func__, msg, tid);
ceph_decode_need(&p, end, sizeof(struct ceph_mon_request_header) +
sizeof(u32), bad);
p += sizeof(struct ceph_mon_request_header);
mutex_lock(&monc->mutex);
req = lookup_generic_request(&monc->generic_request_tree, tid);
if (!req) {
mutex_unlock(&monc->mutex);
return;
}
req->result = ceph_decode_32(&p);
__finish_generic_request(req);
mutex_unlock(&monc->mutex);
complete_generic_request(req);
return;
bad:
pr_err("corrupt mon_command ack, tid %llu\n", tid);
ceph_msg_dump(msg);
}
static __printf(2, 0)
int do_mon_command_vargs(struct ceph_mon_client *monc, const char *fmt,
va_list ap)
{
struct ceph_mon_generic_request *req;
struct ceph_mon_command *h;
int ret = -ENOMEM;
int len;
req = alloc_generic_request(monc, GFP_NOIO);
if (!req)
goto out;
req->request = ceph_msg_new(CEPH_MSG_MON_COMMAND, 256, GFP_NOIO, true);
if (!req->request)
goto out;
req->reply = ceph_msg_new(CEPH_MSG_MON_COMMAND_ACK, 512, GFP_NOIO,
true);
if (!req->reply)
goto out;
mutex_lock(&monc->mutex);
register_generic_request(req);
h = req->request->front.iov_base;
h->monhdr.have_version = 0;
h->monhdr.session_mon = cpu_to_le16(-1);
h->monhdr.session_mon_tid = 0;
h->fsid = monc->monmap->fsid;
h->num_strs = cpu_to_le32(1);
len = vsprintf(h->str, fmt, ap);
h->str_len = cpu_to_le32(len);
send_generic_request(monc, req);
mutex_unlock(&monc->mutex);
ret = wait_generic_request(req);
out:
put_generic_request(req);
return ret;
}
static __printf(2, 3)
int do_mon_command(struct ceph_mon_client *monc, const char *fmt, ...)
{
va_list ap;
int ret;
va_start(ap, fmt);
ret = do_mon_command_vargs(monc, fmt, ap);
va_end(ap);
return ret;
}
int ceph_monc_blocklist_add(struct ceph_mon_client *monc,
struct ceph_entity_addr *client_addr)
{
int ret;
ret = do_mon_command(monc,
"{ \"prefix\": \"osd blocklist\", \
\"blocklistop\": \"add\", \
\"addr\": \"%pISpc/%u\" }",
&client_addr->in_addr,
le32_to_cpu(client_addr->nonce));
if (ret == -EINVAL) {
/*
* The monitor returns EINVAL on an unrecognized command.
* Try the legacy command -- it is exactly the same except
* for the name.
*/
ret = do_mon_command(monc,
"{ \"prefix\": \"osd blacklist\", \
\"blacklistop\": \"add\", \
\"addr\": \"%pISpc/%u\" }",
&client_addr->in_addr,
le32_to_cpu(client_addr->nonce));
}
if (ret)
return ret;
/*
* Make sure we have the osdmap that includes the blocklist
* entry. This is needed to ensure that the OSDs pick up the
* new blocklist before processing any future requests from
* this client.
*/
return ceph_wait_for_latest_osdmap(monc->client, 0);
}
EXPORT_SYMBOL(ceph_monc_blocklist_add);
/*
* Resend pending generic requests.
*/
static void __resend_generic_request(struct ceph_mon_client *monc)
{
struct ceph_mon_generic_request *req;
struct rb_node *p;
for (p = rb_first(&monc->generic_request_tree); p; p = rb_next(p)) {
req = rb_entry(p, struct ceph_mon_generic_request, node);
ceph_msg_revoke(req->request);
ceph_msg_revoke_incoming(req->reply);
ceph_con_send(&monc->con, ceph_msg_get(req->request));
}
}
/*
* Delayed work. If we haven't mounted yet, retry. Otherwise,
* renew/retry subscription as needed (in case it is timing out, or we
* got an ENOMEM). And keep the monitor connection alive.
*/
static void delayed_work(struct work_struct *work)
{
struct ceph_mon_client *monc =
container_of(work, struct ceph_mon_client, delayed_work.work);
dout("monc delayed_work\n");
mutex_lock(&monc->mutex);
if (monc->hunting) {
dout("%s continuing hunt\n", __func__);
reopen_session(monc);
} else {
int is_auth = ceph_auth_is_authenticated(monc->auth);
if (ceph_con_keepalive_expired(&monc->con,
CEPH_MONC_PING_TIMEOUT)) {
dout("monc keepalive timeout\n");
is_auth = 0;
reopen_session(monc);
}
if (!monc->hunting) {
ceph_con_keepalive(&monc->con);
__validate_auth(monc);
un_backoff(monc);
}
if (is_auth &&
!(monc->con.peer_features & CEPH_FEATURE_MON_STATEFUL_SUB)) {
unsigned long now = jiffies;
dout("%s renew subs? now %lu renew after %lu\n",
__func__, now, monc->sub_renew_after);
if (time_after_eq(now, monc->sub_renew_after))
__send_subscribe(monc);
}
}
__schedule_delayed(monc);
mutex_unlock(&monc->mutex);
}
/*
* On startup, we build a temporary monmap populated with the IPs
* provided by mount(2).
*/
static int build_initial_monmap(struct ceph_mon_client *monc)
{
__le32 my_type = ceph_msgr2(monc->client) ?
CEPH_ENTITY_ADDR_TYPE_MSGR2 : CEPH_ENTITY_ADDR_TYPE_LEGACY;
struct ceph_options *opt = monc->client->options;
int num_mon = opt->num_mon;
int i;
/* build initial monmap */
monc->monmap = kzalloc(struct_size(monc->monmap, mon_inst, num_mon),
GFP_KERNEL);
if (!monc->monmap)
return -ENOMEM;
for (i = 0; i < num_mon; i++) {
struct ceph_entity_inst *inst = &monc->monmap->mon_inst[i];
memcpy(&inst->addr.in_addr, &opt->mon_addr[i].in_addr,
sizeof(inst->addr.in_addr));
inst->addr.type = my_type;
inst->addr.nonce = 0;
inst->name.type = CEPH_ENTITY_TYPE_MON;
inst->name.num = cpu_to_le64(i);
}
monc->monmap->num_mon = num_mon;
return 0;
}
int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
{
int err;
dout("init\n");
memset(monc, 0, sizeof(*monc));
monc->client = cl;
mutex_init(&monc->mutex);
err = build_initial_monmap(monc);
if (err)
goto out;
/* connection */
/* authentication */
monc->auth = ceph_auth_init(cl->options->name, cl->options->key,
cl->options->con_modes);
if (IS_ERR(monc->auth)) {
err = PTR_ERR(monc->auth);
goto out_monmap;
}
monc->auth->want_keys =
CEPH_ENTITY_TYPE_AUTH | CEPH_ENTITY_TYPE_MON |
CEPH_ENTITY_TYPE_OSD | CEPH_ENTITY_TYPE_MDS;
/* msgs */
err = -ENOMEM;
monc->m_subscribe_ack = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE_ACK,
sizeof(struct ceph_mon_subscribe_ack),
GFP_KERNEL, true);
if (!monc->m_subscribe_ack)
goto out_auth;
monc->m_subscribe = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 128,
GFP_KERNEL, true);
if (!monc->m_subscribe)
goto out_subscribe_ack;
monc->m_auth_reply = ceph_msg_new(CEPH_MSG_AUTH_REPLY, 4096,
GFP_KERNEL, true);
if (!monc->m_auth_reply)
goto out_subscribe;
monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096, GFP_KERNEL, true);
monc->pending_auth = 0;
if (!monc->m_auth)
goto out_auth_reply;
ceph_con_init(&monc->con, monc, &mon_con_ops,
&monc->client->msgr);
monc->cur_mon = -1;
monc->had_a_connection = false;
monc->hunt_mult = 1;
INIT_DELAYED_WORK(&monc->delayed_work, delayed_work);
monc->generic_request_tree = RB_ROOT;
monc->last_tid = 0;
monc->fs_cluster_id = CEPH_FS_CLUSTER_ID_NONE;
return 0;
out_auth_reply:
ceph_msg_put(monc->m_auth_reply);
out_subscribe:
ceph_msg_put(monc->m_subscribe);
out_subscribe_ack:
ceph_msg_put(monc->m_subscribe_ack);
out_auth:
ceph_auth_destroy(monc->auth);
out_monmap:
kfree(monc->monmap);
out:
return err;
}
EXPORT_SYMBOL(ceph_monc_init);
void ceph_monc_stop(struct ceph_mon_client *monc)
{
dout("stop\n");
cancel_delayed_work_sync(&monc->delayed_work);
mutex_lock(&monc->mutex);
__close_session(monc);
monc->cur_mon = -1;
mutex_unlock(&monc->mutex);
/*
* flush msgr queue before we destroy ourselves to ensure that:
* - any work that references our embedded con is finished.
* - any osd_client or other work that may reference an authorizer
* finishes before we shut down the auth subsystem.
*/
ceph_msgr_flush();
ceph_auth_destroy(monc->auth);
WARN_ON(!RB_EMPTY_ROOT(&monc->generic_request_tree));
ceph_msg_put(monc->m_auth);
ceph_msg_put(monc->m_auth_reply);
ceph_msg_put(monc->m_subscribe);
ceph_msg_put(monc->m_subscribe_ack);
kfree(monc->monmap);
}
EXPORT_SYMBOL(ceph_monc_stop);
static void finish_hunting(struct ceph_mon_client *monc)
{
if (monc->hunting) {
dout("%s found mon%d\n", __func__, monc->cur_mon);
monc->hunting = false;
monc->had_a_connection = true;
un_backoff(monc);
__schedule_delayed(monc);
}
}
static void finish_auth(struct ceph_mon_client *monc, int auth_err,
bool was_authed)
{
dout("%s auth_err %d was_authed %d\n", __func__, auth_err, was_authed);
WARN_ON(auth_err > 0);
monc->pending_auth = 0;
if (auth_err) {
monc->client->auth_err = auth_err;
wake_up_all(&monc->client->auth_wq);
return;
}
if (!was_authed && ceph_auth_is_authenticated(monc->auth)) {
dout("%s authenticated, starting session global_id %llu\n",
__func__, monc->auth->global_id);
monc->client->msgr.inst.name.type = CEPH_ENTITY_TYPE_CLIENT;
monc->client->msgr.inst.name.num =
cpu_to_le64(monc->auth->global_id);
__send_subscribe(monc);
__resend_generic_request(monc);
pr_info("mon%d %s session established\n", monc->cur_mon,
ceph_pr_addr(&monc->con.peer_addr));
}
}
static void handle_auth_reply(struct ceph_mon_client *monc,
struct ceph_msg *msg)
{
bool was_authed;
int ret;
mutex_lock(&monc->mutex);
was_authed = ceph_auth_is_authenticated(monc->auth);
ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base,
msg->front.iov_len,
monc->m_auth->front.iov_base,
monc->m_auth->front_alloc_len);
if (ret > 0) {
__send_prepared_auth_request(monc, ret);
} else {
finish_auth(monc, ret, was_authed);
finish_hunting(monc);
}
mutex_unlock(&monc->mutex);
}
static int __validate_auth(struct ceph_mon_client *monc)
{
int ret;
if (monc->pending_auth)
return 0;
ret = ceph_build_auth(monc->auth, monc->m_auth->front.iov_base,
monc->m_auth->front_alloc_len);
if (ret <= 0)
return ret; /* either an error, or no need to authenticate */
__send_prepared_auth_request(monc, ret);
return 0;
}
int ceph_monc_validate_auth(struct ceph_mon_client *monc)
{
int ret;
mutex_lock(&monc->mutex);
ret = __validate_auth(monc);
mutex_unlock(&monc->mutex);
return ret;
}
EXPORT_SYMBOL(ceph_monc_validate_auth);
static int mon_get_auth_request(struct ceph_connection *con,
void *buf, int *buf_len,
void **authorizer, int *authorizer_len)
{
struct ceph_mon_client *monc = con->private;
int ret;
mutex_lock(&monc->mutex);
ret = ceph_auth_get_request(monc->auth, buf, *buf_len);
mutex_unlock(&monc->mutex);
if (ret < 0)
return ret;
*buf_len = ret;
*authorizer = NULL;
*authorizer_len = 0;
return 0;
}
static int mon_handle_auth_reply_more(struct ceph_connection *con,
void *reply, int reply_len,
void *buf, int *buf_len,
void **authorizer, int *authorizer_len)
{
struct ceph_mon_client *monc = con->private;
int ret;
mutex_lock(&monc->mutex);
ret = ceph_auth_handle_reply_more(monc->auth, reply, reply_len,
buf, *buf_len);
mutex_unlock(&monc->mutex);
if (ret < 0)
return ret;
*buf_len = ret;
*authorizer = NULL;
*authorizer_len = 0;
return 0;
}
static int mon_handle_auth_done(struct ceph_connection *con,
u64 global_id, void *reply, int reply_len,
u8 *session_key, int *session_key_len,
u8 *con_secret, int *con_secret_len)
{
struct ceph_mon_client *monc = con->private;
bool was_authed;
int ret;
mutex_lock(&monc->mutex);
WARN_ON(!monc->hunting);
was_authed = ceph_auth_is_authenticated(monc->auth);
ret = ceph_auth_handle_reply_done(monc->auth, global_id,
reply, reply_len,
session_key, session_key_len,
con_secret, con_secret_len);
finish_auth(monc, ret, was_authed);
if (!ret)
finish_hunting(monc);
mutex_unlock(&monc->mutex);
return 0;
}
static int mon_handle_auth_bad_method(struct ceph_connection *con,
int used_proto, int result,
const int *allowed_protos, int proto_cnt,
const int *allowed_modes, int mode_cnt)
{
struct ceph_mon_client *monc = con->private;
bool was_authed;
mutex_lock(&monc->mutex);
WARN_ON(!monc->hunting);
was_authed = ceph_auth_is_authenticated(monc->auth);
ceph_auth_handle_bad_method(monc->auth, used_proto, result,
allowed_protos, proto_cnt,
allowed_modes, mode_cnt);
finish_auth(monc, -EACCES, was_authed);
mutex_unlock(&monc->mutex);
return 0;
}
/*
* handle incoming message
*/
static void mon_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
{
struct ceph_mon_client *monc = con->private;
int type = le16_to_cpu(msg->hdr.type);
switch (type) {
case CEPH_MSG_AUTH_REPLY:
handle_auth_reply(monc, msg);
break;
case CEPH_MSG_MON_SUBSCRIBE_ACK:
handle_subscribe_ack(monc, msg);
break;
case CEPH_MSG_STATFS_REPLY:
handle_statfs_reply(monc, msg);
break;
case CEPH_MSG_MON_GET_VERSION_REPLY:
handle_get_version_reply(monc, msg);
break;
case CEPH_MSG_MON_COMMAND_ACK:
handle_command_ack(monc, msg);
break;
case CEPH_MSG_MON_MAP:
ceph_monc_handle_map(monc, msg);
break;
case CEPH_MSG_OSD_MAP:
ceph_osdc_handle_map(&monc->client->osdc, msg);
break;
default:
/* can the chained handler handle it? */
if (monc->client->extra_mon_dispatch &&
monc->client->extra_mon_dispatch(monc->client, msg) == 0)
break;
pr_err("received unknown message type %d %s\n", type,
ceph_msg_type_name(type));
}
ceph_msg_put(msg);
}
/*
* Allocate memory for incoming message
*/
static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
struct ceph_msg_header *hdr,
int *skip)
{
struct ceph_mon_client *monc = con->private;
int type = le16_to_cpu(hdr->type);
int front_len = le32_to_cpu(hdr->front_len);
struct ceph_msg *m = NULL;
*skip = 0;
switch (type) {
case CEPH_MSG_MON_SUBSCRIBE_ACK:
m = ceph_msg_get(monc->m_subscribe_ack);
break;
case CEPH_MSG_STATFS_REPLY:
case CEPH_MSG_MON_COMMAND_ACK:
return get_generic_reply(con, hdr, skip);
case CEPH_MSG_AUTH_REPLY:
m = ceph_msg_get(monc->m_auth_reply);
break;
case CEPH_MSG_MON_GET_VERSION_REPLY:
if (le64_to_cpu(hdr->tid) != 0)
return get_generic_reply(con, hdr, skip);
/*
* Older OSDs don't set reply tid even if the original
* request had a non-zero tid. Work around this weirdness
* by allocating a new message.
*/
fallthrough;
case CEPH_MSG_MON_MAP:
case CEPH_MSG_MDS_MAP:
case CEPH_MSG_OSD_MAP:
case CEPH_MSG_FS_MAP_USER:
m = ceph_msg_new(type, front_len, GFP_NOFS, false);
if (!m)
return NULL; /* ENOMEM--return skip == 0 */
break;
}
if (!m) {
pr_info("alloc_msg unknown type %d\n", type);
*skip = 1;
} else if (front_len > m->front_alloc_len) {
pr_warn("mon_alloc_msg front %d > prealloc %d (%u#%llu)\n",
front_len, m->front_alloc_len,
(unsigned int)con->peer_name.type,
le64_to_cpu(con->peer_name.num));
ceph_msg_put(m);
m = ceph_msg_new(type, front_len, GFP_NOFS, false);
}
return m;
}
/*
* If the monitor connection resets, pick a new monitor and resubmit
* any pending requests.
*/
static void mon_fault(struct ceph_connection *con)
{
struct ceph_mon_client *monc = con->private;
mutex_lock(&monc->mutex);
dout("%s mon%d\n", __func__, monc->cur_mon);
if (monc->cur_mon >= 0) {
if (!monc->hunting) {
dout("%s hunting for new mon\n", __func__);
reopen_session(monc);
__schedule_delayed(monc);
} else {
dout("%s already hunting\n", __func__);
}
}
mutex_unlock(&monc->mutex);
}
/*
* We can ignore refcounting on the connection struct, as all references
* will come from the messenger workqueue, which is drained prior to
* mon_client destruction.
*/
static struct ceph_connection *mon_get_con(struct ceph_connection *con)
{
return con;
}
static void mon_put_con(struct ceph_connection *con)
{
}
static const struct ceph_connection_operations mon_con_ops = {
.get = mon_get_con,
.put = mon_put_con,
.alloc_msg = mon_alloc_msg,
.dispatch = mon_dispatch,
.fault = mon_fault,
.get_auth_request = mon_get_auth_request,
.handle_auth_reply_more = mon_handle_auth_reply_more,
.handle_auth_done = mon_handle_auth_done,
.handle_auth_bad_method = mon_handle_auth_bad_method,
};
| linux-master | net/ceph/mon_client.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/ceph/ceph_debug.h>
#include <linux/backing-dev.h>
#include <linux/ctype.h>
#include <linux/fs.h>
#include <linux/inet.h>
#include <linux/in6.h>
#include <linux/key.h>
#include <keys/ceph-type.h>
#include <linux/module.h>
#include <linux/mount.h>
#include <linux/nsproxy.h>
#include <linux/fs_parser.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/statfs.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <linux/ceph/ceph_features.h>
#include <linux/ceph/libceph.h>
#include <linux/ceph/debugfs.h>
#include <linux/ceph/decode.h>
#include <linux/ceph/mon_client.h>
#include <linux/ceph/auth.h>
#include "crypto.h"
/*
* Module compatibility interface. For now it doesn't do anything,
* but its existence signals a certain level of functionality.
*
* The data buffer is used to pass information both to and from
* libceph. The return value indicates whether libceph determines
* it is compatible with the caller (from another kernel module),
* given the provided data.
*
* The data pointer can be null.
*/
bool libceph_compatible(void *data)
{
return true;
}
EXPORT_SYMBOL(libceph_compatible);
static int param_get_supported_features(char *buffer,
const struct kernel_param *kp)
{
return sprintf(buffer, "0x%llx", CEPH_FEATURES_SUPPORTED_DEFAULT);
}
static const struct kernel_param_ops param_ops_supported_features = {
.get = param_get_supported_features,
};
module_param_cb(supported_features, ¶m_ops_supported_features, NULL,
0444);
const char *ceph_msg_type_name(int type)
{
switch (type) {
case CEPH_MSG_SHUTDOWN: return "shutdown";
case CEPH_MSG_PING: return "ping";
case CEPH_MSG_AUTH: return "auth";
case CEPH_MSG_AUTH_REPLY: return "auth_reply";
case CEPH_MSG_MON_MAP: return "mon_map";
case CEPH_MSG_MON_GET_MAP: return "mon_get_map";
case CEPH_MSG_MON_SUBSCRIBE: return "mon_subscribe";
case CEPH_MSG_MON_SUBSCRIBE_ACK: return "mon_subscribe_ack";
case CEPH_MSG_STATFS: return "statfs";
case CEPH_MSG_STATFS_REPLY: return "statfs_reply";
case CEPH_MSG_MON_GET_VERSION: return "mon_get_version";
case CEPH_MSG_MON_GET_VERSION_REPLY: return "mon_get_version_reply";
case CEPH_MSG_MDS_MAP: return "mds_map";
case CEPH_MSG_FS_MAP_USER: return "fs_map_user";
case CEPH_MSG_CLIENT_SESSION: return "client_session";
case CEPH_MSG_CLIENT_RECONNECT: return "client_reconnect";
case CEPH_MSG_CLIENT_REQUEST: return "client_request";
case CEPH_MSG_CLIENT_REQUEST_FORWARD: return "client_request_forward";
case CEPH_MSG_CLIENT_REPLY: return "client_reply";
case CEPH_MSG_CLIENT_CAPS: return "client_caps";
case CEPH_MSG_CLIENT_CAPRELEASE: return "client_cap_release";
case CEPH_MSG_CLIENT_QUOTA: return "client_quota";
case CEPH_MSG_CLIENT_SNAP: return "client_snap";
case CEPH_MSG_CLIENT_LEASE: return "client_lease";
case CEPH_MSG_POOLOP_REPLY: return "poolop_reply";
case CEPH_MSG_POOLOP: return "poolop";
case CEPH_MSG_MON_COMMAND: return "mon_command";
case CEPH_MSG_MON_COMMAND_ACK: return "mon_command_ack";
case CEPH_MSG_OSD_MAP: return "osd_map";
case CEPH_MSG_OSD_OP: return "osd_op";
case CEPH_MSG_OSD_OPREPLY: return "osd_opreply";
case CEPH_MSG_WATCH_NOTIFY: return "watch_notify";
case CEPH_MSG_OSD_BACKOFF: return "osd_backoff";
default: return "unknown";
}
}
EXPORT_SYMBOL(ceph_msg_type_name);
/*
* Initially learn our fsid, or verify an fsid matches.
*/
int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid)
{
if (client->have_fsid) {
if (ceph_fsid_compare(&client->fsid, fsid)) {
pr_err("bad fsid, had %pU got %pU",
&client->fsid, fsid);
return -1;
}
} else {
memcpy(&client->fsid, fsid, sizeof(*fsid));
}
return 0;
}
EXPORT_SYMBOL(ceph_check_fsid);
static int strcmp_null(const char *s1, const char *s2)
{
if (!s1 && !s2)
return 0;
if (s1 && !s2)
return -1;
if (!s1 && s2)
return 1;
return strcmp(s1, s2);
}
int ceph_compare_options(struct ceph_options *new_opt,
struct ceph_client *client)
{
struct ceph_options *opt1 = new_opt;
struct ceph_options *opt2 = client->options;
int ofs = offsetof(struct ceph_options, mon_addr);
int i;
int ret;
/*
* Don't bother comparing options if network namespaces don't
* match.
*/
if (!net_eq(current->nsproxy->net_ns, read_pnet(&client->msgr.net)))
return -1;
ret = memcmp(opt1, opt2, ofs);
if (ret)
return ret;
ret = strcmp_null(opt1->name, opt2->name);
if (ret)
return ret;
if (opt1->key && !opt2->key)
return -1;
if (!opt1->key && opt2->key)
return 1;
if (opt1->key && opt2->key) {
if (opt1->key->type != opt2->key->type)
return -1;
if (opt1->key->created.tv_sec != opt2->key->created.tv_sec)
return -1;
if (opt1->key->created.tv_nsec != opt2->key->created.tv_nsec)
return -1;
if (opt1->key->len != opt2->key->len)
return -1;
if (opt1->key->key && !opt2->key->key)
return -1;
if (!opt1->key->key && opt2->key->key)
return 1;
if (opt1->key->key && opt2->key->key) {
ret = memcmp(opt1->key->key, opt2->key->key, opt1->key->len);
if (ret)
return ret;
}
}
ret = ceph_compare_crush_locs(&opt1->crush_locs, &opt2->crush_locs);
if (ret)
return ret;
/* any matching mon ip implies a match */
for (i = 0; i < opt1->num_mon; i++) {
if (ceph_monmap_contains(client->monc.monmap,
&opt1->mon_addr[i]))
return 0;
}
return -1;
}
EXPORT_SYMBOL(ceph_compare_options);
int ceph_parse_fsid(const char *str, struct ceph_fsid *fsid)
{
int i = 0;
char tmp[3];
int err = -EINVAL;
int d;
dout("%s '%s'\n", __func__, str);
tmp[2] = 0;
while (*str && i < 16) {
if (ispunct(*str)) {
str++;
continue;
}
if (!isxdigit(str[0]) || !isxdigit(str[1]))
break;
tmp[0] = str[0];
tmp[1] = str[1];
if (sscanf(tmp, "%x", &d) < 1)
break;
fsid->fsid[i] = d & 0xff;
i++;
str += 2;
}
if (i == 16)
err = 0;
dout("%s ret %d got fsid %pU\n", __func__, err, fsid);
return err;
}
EXPORT_SYMBOL(ceph_parse_fsid);
/*
* ceph options
*/
enum {
Opt_osdkeepalivetimeout,
Opt_mount_timeout,
Opt_osd_idle_ttl,
Opt_osd_request_timeout,
/* int args above */
Opt_fsid,
Opt_name,
Opt_secret,
Opt_key,
Opt_ip,
Opt_crush_location,
Opt_read_from_replica,
Opt_ms_mode,
/* string args above */
Opt_share,
Opt_crc,
Opt_cephx_require_signatures,
Opt_cephx_sign_messages,
Opt_tcp_nodelay,
Opt_abort_on_full,
Opt_rxbounce,
};
enum {
Opt_read_from_replica_no,
Opt_read_from_replica_balance,
Opt_read_from_replica_localize,
};
static const struct constant_table ceph_param_read_from_replica[] = {
{"no", Opt_read_from_replica_no},
{"balance", Opt_read_from_replica_balance},
{"localize", Opt_read_from_replica_localize},
{}
};
enum ceph_ms_mode {
Opt_ms_mode_legacy,
Opt_ms_mode_crc,
Opt_ms_mode_secure,
Opt_ms_mode_prefer_crc,
Opt_ms_mode_prefer_secure
};
static const struct constant_table ceph_param_ms_mode[] = {
{"legacy", Opt_ms_mode_legacy},
{"crc", Opt_ms_mode_crc},
{"secure", Opt_ms_mode_secure},
{"prefer-crc", Opt_ms_mode_prefer_crc},
{"prefer-secure", Opt_ms_mode_prefer_secure},
{}
};
static const struct fs_parameter_spec ceph_parameters[] = {
fsparam_flag ("abort_on_full", Opt_abort_on_full),
__fsparam (NULL, "cephx_require_signatures", Opt_cephx_require_signatures,
fs_param_neg_with_no|fs_param_deprecated, NULL),
fsparam_flag_no ("cephx_sign_messages", Opt_cephx_sign_messages),
fsparam_flag_no ("crc", Opt_crc),
fsparam_string ("crush_location", Opt_crush_location),
fsparam_string ("fsid", Opt_fsid),
fsparam_string ("ip", Opt_ip),
fsparam_string ("key", Opt_key),
fsparam_u32 ("mount_timeout", Opt_mount_timeout),
fsparam_string ("name", Opt_name),
fsparam_u32 ("osd_idle_ttl", Opt_osd_idle_ttl),
fsparam_u32 ("osd_request_timeout", Opt_osd_request_timeout),
fsparam_u32 ("osdkeepalive", Opt_osdkeepalivetimeout),
fsparam_enum ("read_from_replica", Opt_read_from_replica,
ceph_param_read_from_replica),
fsparam_flag ("rxbounce", Opt_rxbounce),
fsparam_enum ("ms_mode", Opt_ms_mode,
ceph_param_ms_mode),
fsparam_string ("secret", Opt_secret),
fsparam_flag_no ("share", Opt_share),
fsparam_flag_no ("tcp_nodelay", Opt_tcp_nodelay),
{}
};
struct ceph_options *ceph_alloc_options(void)
{
struct ceph_options *opt;
opt = kzalloc(sizeof(*opt), GFP_KERNEL);
if (!opt)
return NULL;
opt->crush_locs = RB_ROOT;
opt->mon_addr = kcalloc(CEPH_MAX_MON, sizeof(*opt->mon_addr),
GFP_KERNEL);
if (!opt->mon_addr) {
kfree(opt);
return NULL;
}
opt->flags = CEPH_OPT_DEFAULT;
opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT;
opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT;
opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT;
opt->osd_request_timeout = CEPH_OSD_REQUEST_TIMEOUT_DEFAULT;
opt->read_from_replica = CEPH_READ_FROM_REPLICA_DEFAULT;
opt->con_modes[0] = CEPH_CON_MODE_UNKNOWN;
opt->con_modes[1] = CEPH_CON_MODE_UNKNOWN;
return opt;
}
EXPORT_SYMBOL(ceph_alloc_options);
void ceph_destroy_options(struct ceph_options *opt)
{
dout("destroy_options %p\n", opt);
if (!opt)
return;
ceph_clear_crush_locs(&opt->crush_locs);
kfree(opt->name);
if (opt->key) {
ceph_crypto_key_destroy(opt->key);
kfree(opt->key);
}
kfree(opt->mon_addr);
kfree(opt);
}
EXPORT_SYMBOL(ceph_destroy_options);
/* get secret from key store */
static int get_secret(struct ceph_crypto_key *dst, const char *name,
struct p_log *log)
{
struct key *ukey;
int key_err;
int err = 0;
struct ceph_crypto_key *ckey;
ukey = request_key(&key_type_ceph, name, NULL);
if (IS_ERR(ukey)) {
/* request_key errors don't map nicely to mount(2)
errors; don't even try, but still printk */
key_err = PTR_ERR(ukey);
switch (key_err) {
case -ENOKEY:
error_plog(log, "Failed due to key not found: %s",
name);
break;
case -EKEYEXPIRED:
error_plog(log, "Failed due to expired key: %s",
name);
break;
case -EKEYREVOKED:
error_plog(log, "Failed due to revoked key: %s",
name);
break;
default:
error_plog(log, "Failed due to key error %d: %s",
key_err, name);
}
err = -EPERM;
goto out;
}
ckey = ukey->payload.data[0];
err = ceph_crypto_key_clone(dst, ckey);
if (err)
goto out_key;
/* pass through, err is 0 */
out_key:
key_put(ukey);
out:
return err;
}
int ceph_parse_mon_ips(const char *buf, size_t len, struct ceph_options *opt,
struct fc_log *l, char delim)
{
struct p_log log = {.prefix = "libceph", .log = l};
int ret;
/* ip1[:port1][<delim>ip2[:port2]...] */
ret = ceph_parse_ips(buf, buf + len, opt->mon_addr, CEPH_MAX_MON,
&opt->num_mon, delim);
if (ret) {
error_plog(&log, "Failed to parse monitor IPs: %d", ret);
return ret;
}
return 0;
}
EXPORT_SYMBOL(ceph_parse_mon_ips);
int ceph_parse_param(struct fs_parameter *param, struct ceph_options *opt,
struct fc_log *l)
{
struct fs_parse_result result;
int token, err;
struct p_log log = {.prefix = "libceph", .log = l};
token = __fs_parse(&log, ceph_parameters, param, &result);
dout("%s fs_parse '%s' token %d\n", __func__, param->key, token);
if (token < 0)
return token;
switch (token) {
case Opt_ip:
err = ceph_parse_ips(param->string,
param->string + param->size,
&opt->my_addr, 1, NULL, ',');
if (err) {
error_plog(&log, "Failed to parse ip: %d", err);
return err;
}
opt->flags |= CEPH_OPT_MYIP;
break;
case Opt_fsid:
err = ceph_parse_fsid(param->string, &opt->fsid);
if (err) {
error_plog(&log, "Failed to parse fsid: %d", err);
return err;
}
opt->flags |= CEPH_OPT_FSID;
break;
case Opt_name:
kfree(opt->name);
opt->name = param->string;
param->string = NULL;
break;
case Opt_secret:
ceph_crypto_key_destroy(opt->key);
kfree(opt->key);
opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL);
if (!opt->key)
return -ENOMEM;
err = ceph_crypto_key_unarmor(opt->key, param->string);
if (err) {
error_plog(&log, "Failed to parse secret: %d", err);
return err;
}
break;
case Opt_key:
ceph_crypto_key_destroy(opt->key);
kfree(opt->key);
opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL);
if (!opt->key)
return -ENOMEM;
return get_secret(opt->key, param->string, &log);
case Opt_crush_location:
ceph_clear_crush_locs(&opt->crush_locs);
err = ceph_parse_crush_location(param->string,
&opt->crush_locs);
if (err) {
error_plog(&log, "Failed to parse CRUSH location: %d",
err);
return err;
}
break;
case Opt_read_from_replica:
switch (result.uint_32) {
case Opt_read_from_replica_no:
opt->read_from_replica = 0;
break;
case Opt_read_from_replica_balance:
opt->read_from_replica = CEPH_OSD_FLAG_BALANCE_READS;
break;
case Opt_read_from_replica_localize:
opt->read_from_replica = CEPH_OSD_FLAG_LOCALIZE_READS;
break;
default:
BUG();
}
break;
case Opt_ms_mode:
switch (result.uint_32) {
case Opt_ms_mode_legacy:
opt->con_modes[0] = CEPH_CON_MODE_UNKNOWN;
opt->con_modes[1] = CEPH_CON_MODE_UNKNOWN;
break;
case Opt_ms_mode_crc:
opt->con_modes[0] = CEPH_CON_MODE_CRC;
opt->con_modes[1] = CEPH_CON_MODE_UNKNOWN;
break;
case Opt_ms_mode_secure:
opt->con_modes[0] = CEPH_CON_MODE_SECURE;
opt->con_modes[1] = CEPH_CON_MODE_UNKNOWN;
break;
case Opt_ms_mode_prefer_crc:
opt->con_modes[0] = CEPH_CON_MODE_CRC;
opt->con_modes[1] = CEPH_CON_MODE_SECURE;
break;
case Opt_ms_mode_prefer_secure:
opt->con_modes[0] = CEPH_CON_MODE_SECURE;
opt->con_modes[1] = CEPH_CON_MODE_CRC;
break;
default:
BUG();
}
break;
case Opt_osdkeepalivetimeout:
/* 0 isn't well defined right now, reject it */
if (result.uint_32 < 1 || result.uint_32 > INT_MAX / 1000)
goto out_of_range;
opt->osd_keepalive_timeout =
msecs_to_jiffies(result.uint_32 * 1000);
break;
case Opt_osd_idle_ttl:
/* 0 isn't well defined right now, reject it */
if (result.uint_32 < 1 || result.uint_32 > INT_MAX / 1000)
goto out_of_range;
opt->osd_idle_ttl = msecs_to_jiffies(result.uint_32 * 1000);
break;
case Opt_mount_timeout:
/* 0 is "wait forever" (i.e. infinite timeout) */
if (result.uint_32 > INT_MAX / 1000)
goto out_of_range;
opt->mount_timeout = msecs_to_jiffies(result.uint_32 * 1000);
break;
case Opt_osd_request_timeout:
/* 0 is "wait forever" (i.e. infinite timeout) */
if (result.uint_32 > INT_MAX / 1000)
goto out_of_range;
opt->osd_request_timeout =
msecs_to_jiffies(result.uint_32 * 1000);
break;
case Opt_share:
if (!result.negated)
opt->flags &= ~CEPH_OPT_NOSHARE;
else
opt->flags |= CEPH_OPT_NOSHARE;
break;
case Opt_crc:
if (!result.negated)
opt->flags &= ~CEPH_OPT_NOCRC;
else
opt->flags |= CEPH_OPT_NOCRC;
break;
case Opt_cephx_require_signatures:
if (!result.negated)
warn_plog(&log, "Ignoring cephx_require_signatures");
else
warn_plog(&log, "Ignoring nocephx_require_signatures, use nocephx_sign_messages");
break;
case Opt_cephx_sign_messages:
if (!result.negated)
opt->flags &= ~CEPH_OPT_NOMSGSIGN;
else
opt->flags |= CEPH_OPT_NOMSGSIGN;
break;
case Opt_tcp_nodelay:
if (!result.negated)
opt->flags |= CEPH_OPT_TCP_NODELAY;
else
opt->flags &= ~CEPH_OPT_TCP_NODELAY;
break;
case Opt_abort_on_full:
opt->flags |= CEPH_OPT_ABORT_ON_FULL;
break;
case Opt_rxbounce:
opt->flags |= CEPH_OPT_RXBOUNCE;
break;
default:
BUG();
}
return 0;
out_of_range:
return inval_plog(&log, "%s out of range", param->key);
}
EXPORT_SYMBOL(ceph_parse_param);
int ceph_print_client_options(struct seq_file *m, struct ceph_client *client,
bool show_all)
{
struct ceph_options *opt = client->options;
size_t pos = m->count;
struct rb_node *n;
if (opt->name) {
seq_puts(m, "name=");
seq_escape(m, opt->name, ", \t\n\\");
seq_putc(m, ',');
}
if (opt->key)
seq_puts(m, "secret=<hidden>,");
if (!RB_EMPTY_ROOT(&opt->crush_locs)) {
seq_puts(m, "crush_location=");
for (n = rb_first(&opt->crush_locs); ; ) {
struct crush_loc_node *loc =
rb_entry(n, struct crush_loc_node, cl_node);
seq_printf(m, "%s:%s", loc->cl_loc.cl_type_name,
loc->cl_loc.cl_name);
n = rb_next(n);
if (!n)
break;
seq_putc(m, '|');
}
seq_putc(m, ',');
}
if (opt->read_from_replica == CEPH_OSD_FLAG_BALANCE_READS) {
seq_puts(m, "read_from_replica=balance,");
} else if (opt->read_from_replica == CEPH_OSD_FLAG_LOCALIZE_READS) {
seq_puts(m, "read_from_replica=localize,");
}
if (opt->con_modes[0] != CEPH_CON_MODE_UNKNOWN) {
if (opt->con_modes[0] == CEPH_CON_MODE_CRC &&
opt->con_modes[1] == CEPH_CON_MODE_UNKNOWN) {
seq_puts(m, "ms_mode=crc,");
} else if (opt->con_modes[0] == CEPH_CON_MODE_SECURE &&
opt->con_modes[1] == CEPH_CON_MODE_UNKNOWN) {
seq_puts(m, "ms_mode=secure,");
} else if (opt->con_modes[0] == CEPH_CON_MODE_CRC &&
opt->con_modes[1] == CEPH_CON_MODE_SECURE) {
seq_puts(m, "ms_mode=prefer-crc,");
} else if (opt->con_modes[0] == CEPH_CON_MODE_SECURE &&
opt->con_modes[1] == CEPH_CON_MODE_CRC) {
seq_puts(m, "ms_mode=prefer-secure,");
}
}
if (opt->flags & CEPH_OPT_FSID)
seq_printf(m, "fsid=%pU,", &opt->fsid);
if (opt->flags & CEPH_OPT_NOSHARE)
seq_puts(m, "noshare,");
if (opt->flags & CEPH_OPT_NOCRC)
seq_puts(m, "nocrc,");
if (opt->flags & CEPH_OPT_NOMSGSIGN)
seq_puts(m, "nocephx_sign_messages,");
if ((opt->flags & CEPH_OPT_TCP_NODELAY) == 0)
seq_puts(m, "notcp_nodelay,");
if (show_all && (opt->flags & CEPH_OPT_ABORT_ON_FULL))
seq_puts(m, "abort_on_full,");
if (opt->flags & CEPH_OPT_RXBOUNCE)
seq_puts(m, "rxbounce,");
if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT)
seq_printf(m, "mount_timeout=%d,",
jiffies_to_msecs(opt->mount_timeout) / 1000);
if (opt->osd_idle_ttl != CEPH_OSD_IDLE_TTL_DEFAULT)
seq_printf(m, "osd_idle_ttl=%d,",
jiffies_to_msecs(opt->osd_idle_ttl) / 1000);
if (opt->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT)
seq_printf(m, "osdkeepalivetimeout=%d,",
jiffies_to_msecs(opt->osd_keepalive_timeout) / 1000);
if (opt->osd_request_timeout != CEPH_OSD_REQUEST_TIMEOUT_DEFAULT)
seq_printf(m, "osd_request_timeout=%d,",
jiffies_to_msecs(opt->osd_request_timeout) / 1000);
/* drop redundant comma */
if (m->count != pos)
m->count--;
return 0;
}
EXPORT_SYMBOL(ceph_print_client_options);
struct ceph_entity_addr *ceph_client_addr(struct ceph_client *client)
{
return &client->msgr.inst.addr;
}
EXPORT_SYMBOL(ceph_client_addr);
u64 ceph_client_gid(struct ceph_client *client)
{
return client->monc.auth->global_id;
}
EXPORT_SYMBOL(ceph_client_gid);
/*
* create a fresh client instance
*/
struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private)
{
struct ceph_client *client;
struct ceph_entity_addr *myaddr = NULL;
int err;
err = wait_for_random_bytes();
if (err < 0)
return ERR_PTR(err);
client = kzalloc(sizeof(*client), GFP_KERNEL);
if (client == NULL)
return ERR_PTR(-ENOMEM);
client->private = private;
client->options = opt;
mutex_init(&client->mount_mutex);
init_waitqueue_head(&client->auth_wq);
client->auth_err = 0;
client->extra_mon_dispatch = NULL;
client->supported_features = CEPH_FEATURES_SUPPORTED_DEFAULT;
client->required_features = CEPH_FEATURES_REQUIRED_DEFAULT;
if (!ceph_test_opt(client, NOMSGSIGN))
client->required_features |= CEPH_FEATURE_MSG_AUTH;
/* msgr */
if (ceph_test_opt(client, MYIP))
myaddr = &client->options->my_addr;
ceph_messenger_init(&client->msgr, myaddr);
/* subsystems */
err = ceph_monc_init(&client->monc, client);
if (err < 0)
goto fail;
err = ceph_osdc_init(&client->osdc, client);
if (err < 0)
goto fail_monc;
return client;
fail_monc:
ceph_monc_stop(&client->monc);
fail:
ceph_messenger_fini(&client->msgr);
kfree(client);
return ERR_PTR(err);
}
EXPORT_SYMBOL(ceph_create_client);
void ceph_destroy_client(struct ceph_client *client)
{
dout("destroy_client %p\n", client);
atomic_set(&client->msgr.stopping, 1);
/* unmount */
ceph_osdc_stop(&client->osdc);
ceph_monc_stop(&client->monc);
ceph_messenger_fini(&client->msgr);
ceph_debugfs_client_cleanup(client);
ceph_destroy_options(client->options);
kfree(client);
dout("destroy_client %p done\n", client);
}
EXPORT_SYMBOL(ceph_destroy_client);
void ceph_reset_client_addr(struct ceph_client *client)
{
ceph_messenger_reset_nonce(&client->msgr);
ceph_monc_reopen_session(&client->monc);
ceph_osdc_reopen_osds(&client->osdc);
}
EXPORT_SYMBOL(ceph_reset_client_addr);
/*
* true if we have the mon map (and have thus joined the cluster)
*/
static bool have_mon_and_osd_map(struct ceph_client *client)
{
return client->monc.monmap && client->monc.monmap->epoch &&
client->osdc.osdmap && client->osdc.osdmap->epoch;
}
/*
* mount: join the ceph cluster, and open root directory.
*/
int __ceph_open_session(struct ceph_client *client, unsigned long started)
{
unsigned long timeout = client->options->mount_timeout;
long err;
/* open session, and wait for mon and osd maps */
err = ceph_monc_open_session(&client->monc);
if (err < 0)
return err;
while (!have_mon_and_osd_map(client)) {
if (timeout && time_after_eq(jiffies, started + timeout))
return -ETIMEDOUT;
/* wait */
dout("mount waiting for mon_map\n");
err = wait_event_interruptible_timeout(client->auth_wq,
have_mon_and_osd_map(client) || (client->auth_err < 0),
ceph_timeout_jiffies(timeout));
if (err < 0)
return err;
if (client->auth_err < 0)
return client->auth_err;
}
pr_info("client%llu fsid %pU\n", ceph_client_gid(client),
&client->fsid);
ceph_debugfs_client_init(client);
return 0;
}
EXPORT_SYMBOL(__ceph_open_session);
int ceph_open_session(struct ceph_client *client)
{
int ret;
unsigned long started = jiffies; /* note the start time */
dout("open_session start\n");
mutex_lock(&client->mount_mutex);
ret = __ceph_open_session(client, started);
mutex_unlock(&client->mount_mutex);
return ret;
}
EXPORT_SYMBOL(ceph_open_session);
int ceph_wait_for_latest_osdmap(struct ceph_client *client,
unsigned long timeout)
{
u64 newest_epoch;
int ret;
ret = ceph_monc_get_version(&client->monc, "osdmap", &newest_epoch);
if (ret)
return ret;
if (client->osdc.osdmap->epoch >= newest_epoch)
return 0;
ceph_osdc_maybe_request_map(&client->osdc);
return ceph_monc_wait_osdmap(&client->monc, newest_epoch, timeout);
}
EXPORT_SYMBOL(ceph_wait_for_latest_osdmap);
static int __init init_ceph_lib(void)
{
int ret = 0;
ceph_debugfs_init();
ret = ceph_crypto_init();
if (ret < 0)
goto out_debugfs;
ret = ceph_msgr_init();
if (ret < 0)
goto out_crypto;
ret = ceph_osdc_setup();
if (ret < 0)
goto out_msgr;
pr_info("loaded (mon/osd proto %d/%d)\n",
CEPH_MONC_PROTOCOL, CEPH_OSDC_PROTOCOL);
return 0;
out_msgr:
ceph_msgr_exit();
out_crypto:
ceph_crypto_shutdown();
out_debugfs:
ceph_debugfs_cleanup();
return ret;
}
static void __exit exit_ceph_lib(void)
{
dout("exit_ceph_lib\n");
WARN_ON(!ceph_strings_empty());
ceph_osdc_cleanup();
ceph_msgr_exit();
ceph_crypto_shutdown();
ceph_debugfs_cleanup();
}
module_init(init_ceph_lib);
module_exit(exit_ceph_lib);
MODULE_AUTHOR("Sage Weil <[email protected]>");
MODULE_AUTHOR("Yehuda Sadeh <[email protected]>");
MODULE_AUTHOR("Patience Warnick <[email protected]>");
MODULE_DESCRIPTION("Ceph core library");
MODULE_LICENSE("GPL");
| linux-master | net/ceph/ceph_common.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/ceph/ceph_debug.h>
#include <linux/inet.h>
#include <linux/ceph/decode.h>
#include <linux/ceph/messenger.h> /* for ceph_pr_addr() */
static int
ceph_decode_entity_addr_versioned(void **p, void *end,
struct ceph_entity_addr *addr)
{
int ret;
u8 struct_v;
u32 struct_len, addr_len;
void *struct_end;
ret = ceph_start_decoding(p, end, 1, "entity_addr_t", &struct_v,
&struct_len);
if (ret)
goto bad;
ret = -EINVAL;
struct_end = *p + struct_len;
ceph_decode_copy_safe(p, end, &addr->type, sizeof(addr->type), bad);
ceph_decode_copy_safe(p, end, &addr->nonce, sizeof(addr->nonce), bad);
ceph_decode_32_safe(p, end, addr_len, bad);
if (addr_len > sizeof(addr->in_addr))
goto bad;
memset(&addr->in_addr, 0, sizeof(addr->in_addr));
if (addr_len) {
ceph_decode_copy_safe(p, end, &addr->in_addr, addr_len, bad);
addr->in_addr.ss_family =
le16_to_cpu((__force __le16)addr->in_addr.ss_family);
}
/* Advance past anything the client doesn't yet understand */
*p = struct_end;
ret = 0;
bad:
return ret;
}
static int
ceph_decode_entity_addr_legacy(void **p, void *end,
struct ceph_entity_addr *addr)
{
int ret = -EINVAL;
/* Skip rest of type field */
ceph_decode_skip_n(p, end, 3, bad);
/*
* Clients that don't support ADDR2 always send TYPE_NONE, change it
* to TYPE_LEGACY for forward compatibility.
*/
addr->type = CEPH_ENTITY_ADDR_TYPE_LEGACY;
ceph_decode_copy_safe(p, end, &addr->nonce, sizeof(addr->nonce), bad);
memset(&addr->in_addr, 0, sizeof(addr->in_addr));
ceph_decode_copy_safe(p, end, &addr->in_addr,
sizeof(addr->in_addr), bad);
addr->in_addr.ss_family =
be16_to_cpu((__force __be16)addr->in_addr.ss_family);
ret = 0;
bad:
return ret;
}
int
ceph_decode_entity_addr(void **p, void *end, struct ceph_entity_addr *addr)
{
u8 marker;
ceph_decode_8_safe(p, end, marker, bad);
if (marker == 1)
return ceph_decode_entity_addr_versioned(p, end, addr);
else if (marker == 0)
return ceph_decode_entity_addr_legacy(p, end, addr);
bad:
return -EINVAL;
}
EXPORT_SYMBOL(ceph_decode_entity_addr);
/*
* Return addr of desired type (MSGR2 or LEGACY) or error.
* Make sure there is only one match.
*
* Assume encoding with MSG_ADDR2.
*/
int ceph_decode_entity_addrvec(void **p, void *end, bool msgr2,
struct ceph_entity_addr *addr)
{
__le32 my_type = msgr2 ? CEPH_ENTITY_ADDR_TYPE_MSGR2 :
CEPH_ENTITY_ADDR_TYPE_LEGACY;
struct ceph_entity_addr tmp_addr;
int addr_cnt;
bool found;
u8 marker;
int ret;
int i;
ceph_decode_8_safe(p, end, marker, e_inval);
if (marker != 2) {
pr_err("bad addrvec marker %d\n", marker);
return -EINVAL;
}
ceph_decode_32_safe(p, end, addr_cnt, e_inval);
dout("%s addr_cnt %d\n", __func__, addr_cnt);
found = false;
for (i = 0; i < addr_cnt; i++) {
ret = ceph_decode_entity_addr(p, end, &tmp_addr);
if (ret)
return ret;
dout("%s i %d addr %s\n", __func__, i, ceph_pr_addr(&tmp_addr));
if (tmp_addr.type == my_type) {
if (found) {
pr_err("another match of type %d in addrvec\n",
le32_to_cpu(my_type));
return -EINVAL;
}
memcpy(addr, &tmp_addr, sizeof(*addr));
found = true;
}
}
if (found)
return 0;
if (!addr_cnt)
return 0; /* normal -- e.g. unused OSD id/slot */
if (addr_cnt == 1 && !memchr_inv(&tmp_addr, 0, sizeof(tmp_addr)))
return 0; /* weird but effectively the same as !addr_cnt */
pr_err("no match of type %d in addrvec\n", le32_to_cpu(my_type));
return -ENOENT;
e_inval:
return -EINVAL;
}
EXPORT_SYMBOL(ceph_decode_entity_addrvec);
static int get_sockaddr_encoding_len(sa_family_t family)
{
union {
struct sockaddr sa;
struct sockaddr_in sin;
struct sockaddr_in6 sin6;
} u;
switch (family) {
case AF_INET:
return sizeof(u.sin);
case AF_INET6:
return sizeof(u.sin6);
default:
return sizeof(u);
}
}
int ceph_entity_addr_encoding_len(const struct ceph_entity_addr *addr)
{
sa_family_t family = get_unaligned(&addr->in_addr.ss_family);
int addr_len = get_sockaddr_encoding_len(family);
return 1 + CEPH_ENCODING_START_BLK_LEN + 4 + 4 + 4 + addr_len;
}
void ceph_encode_entity_addr(void **p, const struct ceph_entity_addr *addr)
{
sa_family_t family = get_unaligned(&addr->in_addr.ss_family);
int addr_len = get_sockaddr_encoding_len(family);
ceph_encode_8(p, 1); /* marker */
ceph_start_encoding(p, 1, 1, sizeof(addr->type) +
sizeof(addr->nonce) +
sizeof(u32) + addr_len);
ceph_encode_copy(p, &addr->type, sizeof(addr->type));
ceph_encode_copy(p, &addr->nonce, sizeof(addr->nonce));
ceph_encode_32(p, addr_len);
ceph_encode_16(p, family);
ceph_encode_copy(p, addr->in_addr.__data, addr_len - sizeof(family));
}
| linux-master | net/ceph/decode.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/errno.h>
int ceph_armor(char *dst, const char *src, const char *end);
int ceph_unarmor(char *dst, const char *src, const char *end);
/*
* base64 encode/decode.
*/
static const char *pem_key =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
static int encode_bits(int c)
{
return pem_key[c];
}
static int decode_bits(char c)
{
if (c >= 'A' && c <= 'Z')
return c - 'A';
if (c >= 'a' && c <= 'z')
return c - 'a' + 26;
if (c >= '0' && c <= '9')
return c - '0' + 52;
if (c == '+')
return 62;
if (c == '/')
return 63;
if (c == '=')
return 0; /* just non-negative, please */
return -EINVAL;
}
int ceph_armor(char *dst, const char *src, const char *end)
{
int olen = 0;
int line = 0;
while (src < end) {
unsigned char a, b, c;
a = *src++;
*dst++ = encode_bits(a >> 2);
if (src < end) {
b = *src++;
*dst++ = encode_bits(((a & 3) << 4) | (b >> 4));
if (src < end) {
c = *src++;
*dst++ = encode_bits(((b & 15) << 2) |
(c >> 6));
*dst++ = encode_bits(c & 63);
} else {
*dst++ = encode_bits((b & 15) << 2);
*dst++ = '=';
}
} else {
*dst++ = encode_bits(((a & 3) << 4));
*dst++ = '=';
*dst++ = '=';
}
olen += 4;
line += 4;
if (line == 64) {
line = 0;
*(dst++) = '\n';
olen++;
}
}
return olen;
}
int ceph_unarmor(char *dst, const char *src, const char *end)
{
int olen = 0;
while (src < end) {
int a, b, c, d;
if (src[0] == '\n') {
src++;
continue;
}
if (src + 4 > end)
return -EINVAL;
a = decode_bits(src[0]);
b = decode_bits(src[1]);
c = decode_bits(src[2]);
d = decode_bits(src[3]);
if (a < 0 || b < 0 || c < 0 || d < 0)
return -EINVAL;
*dst++ = (a << 2) | (b >> 4);
if (src[2] == '=')
return olen + 1;
*dst++ = ((b & 15) << 4) | (c >> 2);
if (src[3] == '=')
return olen + 2;
*dst++ = ((c & 3) << 6) | d;
olen += 3;
src += 4;
}
return olen;
}
| linux-master | net/ceph/armor.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/ceph/ceph_debug.h>
#include <linux/bvec.h>
#include <linux/crc32c.h>
#include <linux/net.h>
#include <linux/socket.h>
#include <net/sock.h>
#include <linux/ceph/ceph_features.h>
#include <linux/ceph/decode.h>
#include <linux/ceph/libceph.h>
#include <linux/ceph/messenger.h>
/* static tag bytes (protocol control messages) */
static char tag_msg = CEPH_MSGR_TAG_MSG;
static char tag_ack = CEPH_MSGR_TAG_ACK;
static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE;
static char tag_keepalive2 = CEPH_MSGR_TAG_KEEPALIVE2;
/*
* If @buf is NULL, discard up to @len bytes.
*/
static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
{
struct kvec iov = {buf, len};
struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
int r;
if (!buf)
msg.msg_flags |= MSG_TRUNC;
iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, len);
r = sock_recvmsg(sock, &msg, msg.msg_flags);
if (r == -EAGAIN)
r = 0;
return r;
}
static int ceph_tcp_recvpage(struct socket *sock, struct page *page,
int page_offset, size_t length)
{
struct bio_vec bvec;
struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
int r;
BUG_ON(page_offset + length > PAGE_SIZE);
bvec_set_page(&bvec, page, length, page_offset);
iov_iter_bvec(&msg.msg_iter, ITER_DEST, &bvec, 1, length);
r = sock_recvmsg(sock, &msg, msg.msg_flags);
if (r == -EAGAIN)
r = 0;
return r;
}
/*
* write something. @more is true if caller will be sending more data
* shortly.
*/
static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
size_t kvlen, size_t len, bool more)
{
struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
int r;
if (more)
msg.msg_flags |= MSG_MORE;
else
msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */
r = kernel_sendmsg(sock, &msg, iov, kvlen, len);
if (r == -EAGAIN)
r = 0;
return r;
}
/*
* @more: MSG_MORE or 0.
*/
static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
int offset, size_t size, int more)
{
struct msghdr msg = {
.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL | more,
};
struct bio_vec bvec;
int ret;
/*
* MSG_SPLICE_PAGES cannot properly handle pages with page_count == 0,
* we need to fall back to sendmsg if that's the case.
*
* Same goes for slab pages: skb_can_coalesce() allows
* coalescing neighboring slab objects into a single frag which
* triggers one of hardened usercopy checks.
*/
if (sendpage_ok(page))
msg.msg_flags |= MSG_SPLICE_PAGES;
bvec_set_page(&bvec, page, size, offset);
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
ret = sock_sendmsg(sock, &msg);
if (ret == -EAGAIN)
ret = 0;
return ret;
}
static void con_out_kvec_reset(struct ceph_connection *con)
{
BUG_ON(con->v1.out_skip);
con->v1.out_kvec_left = 0;
con->v1.out_kvec_bytes = 0;
con->v1.out_kvec_cur = &con->v1.out_kvec[0];
}
static void con_out_kvec_add(struct ceph_connection *con,
size_t size, void *data)
{
int index = con->v1.out_kvec_left;
BUG_ON(con->v1.out_skip);
BUG_ON(index >= ARRAY_SIZE(con->v1.out_kvec));
con->v1.out_kvec[index].iov_len = size;
con->v1.out_kvec[index].iov_base = data;
con->v1.out_kvec_left++;
con->v1.out_kvec_bytes += size;
}
/*
* Chop off a kvec from the end. Return residual number of bytes for
* that kvec, i.e. how many bytes would have been written if the kvec
* hadn't been nuked.
*/
static int con_out_kvec_skip(struct ceph_connection *con)
{
int skip = 0;
if (con->v1.out_kvec_bytes > 0) {
skip = con->v1.out_kvec_cur[con->v1.out_kvec_left - 1].iov_len;
BUG_ON(con->v1.out_kvec_bytes < skip);
BUG_ON(!con->v1.out_kvec_left);
con->v1.out_kvec_bytes -= skip;
con->v1.out_kvec_left--;
}
return skip;
}
static size_t sizeof_footer(struct ceph_connection *con)
{
return (con->peer_features & CEPH_FEATURE_MSG_AUTH) ?
sizeof(struct ceph_msg_footer) :
sizeof(struct ceph_msg_footer_old);
}
static void prepare_message_data(struct ceph_msg *msg, u32 data_len)
{
/* Initialize data cursor if it's not a sparse read */
if (!msg->sparse_read)
ceph_msg_data_cursor_init(&msg->cursor, msg, data_len);
}
/*
* Prepare footer for currently outgoing message, and finish things
* off. Assumes out_kvec* are already valid.. we just add on to the end.
*/
static void prepare_write_message_footer(struct ceph_connection *con)
{
struct ceph_msg *m = con->out_msg;
m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE;
dout("prepare_write_message_footer %p\n", con);
con_out_kvec_add(con, sizeof_footer(con), &m->footer);
if (con->peer_features & CEPH_FEATURE_MSG_AUTH) {
if (con->ops->sign_message)
con->ops->sign_message(m);
else
m->footer.sig = 0;
} else {
m->old_footer.flags = m->footer.flags;
}
con->v1.out_more = m->more_to_follow;
con->v1.out_msg_done = true;
}
/*
* Prepare headers for the next outgoing message.
*/
static void prepare_write_message(struct ceph_connection *con)
{
struct ceph_msg *m;
u32 crc;
con_out_kvec_reset(con);
con->v1.out_msg_done = false;
/* Sneak an ack in there first? If we can get it into the same
* TCP packet that's a good thing. */
if (con->in_seq > con->in_seq_acked) {
con->in_seq_acked = con->in_seq;
con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
con->v1.out_temp_ack = cpu_to_le64(con->in_seq_acked);
con_out_kvec_add(con, sizeof(con->v1.out_temp_ack),
&con->v1.out_temp_ack);
}
ceph_con_get_out_msg(con);
m = con->out_msg;
dout("prepare_write_message %p seq %lld type %d len %d+%d+%zd\n",
m, con->out_seq, le16_to_cpu(m->hdr.type),
le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len),
m->data_length);
WARN_ON(m->front.iov_len != le32_to_cpu(m->hdr.front_len));
WARN_ON(m->data_length != le32_to_cpu(m->hdr.data_len));
/* tag + hdr + front + middle */
con_out_kvec_add(con, sizeof (tag_msg), &tag_msg);
con_out_kvec_add(con, sizeof(con->v1.out_hdr), &con->v1.out_hdr);
con_out_kvec_add(con, m->front.iov_len, m->front.iov_base);
if (m->middle)
con_out_kvec_add(con, m->middle->vec.iov_len,
m->middle->vec.iov_base);
/* fill in hdr crc and finalize hdr */
crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc));
con->out_msg->hdr.crc = cpu_to_le32(crc);
memcpy(&con->v1.out_hdr, &con->out_msg->hdr, sizeof(con->v1.out_hdr));
/* fill in front and middle crc, footer */
crc = crc32c(0, m->front.iov_base, m->front.iov_len);
con->out_msg->footer.front_crc = cpu_to_le32(crc);
if (m->middle) {
crc = crc32c(0, m->middle->vec.iov_base,
m->middle->vec.iov_len);
con->out_msg->footer.middle_crc = cpu_to_le32(crc);
} else
con->out_msg->footer.middle_crc = 0;
dout("%s front_crc %u middle_crc %u\n", __func__,
le32_to_cpu(con->out_msg->footer.front_crc),
le32_to_cpu(con->out_msg->footer.middle_crc));
con->out_msg->footer.flags = 0;
/* is there a data payload? */
con->out_msg->footer.data_crc = 0;
if (m->data_length) {
prepare_message_data(con->out_msg, m->data_length);
con->v1.out_more = 1; /* data + footer will follow */
} else {
/* no, queue up footer too and be done */
prepare_write_message_footer(con);
}
ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
}
/*
* Prepare an ack.
*/
static void prepare_write_ack(struct ceph_connection *con)
{
dout("prepare_write_ack %p %llu -> %llu\n", con,
con->in_seq_acked, con->in_seq);
con->in_seq_acked = con->in_seq;
con_out_kvec_reset(con);
con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
con->v1.out_temp_ack = cpu_to_le64(con->in_seq_acked);
con_out_kvec_add(con, sizeof(con->v1.out_temp_ack),
&con->v1.out_temp_ack);
con->v1.out_more = 1; /* more will follow.. eventually.. */
ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
}
/*
* Prepare to share the seq during handshake
*/
static void prepare_write_seq(struct ceph_connection *con)
{
dout("prepare_write_seq %p %llu -> %llu\n", con,
con->in_seq_acked, con->in_seq);
con->in_seq_acked = con->in_seq;
con_out_kvec_reset(con);
con->v1.out_temp_ack = cpu_to_le64(con->in_seq_acked);
con_out_kvec_add(con, sizeof(con->v1.out_temp_ack),
&con->v1.out_temp_ack);
ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
}
/*
* Prepare to write keepalive byte.
*/
static void prepare_write_keepalive(struct ceph_connection *con)
{
dout("prepare_write_keepalive %p\n", con);
con_out_kvec_reset(con);
if (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2) {
struct timespec64 now;
ktime_get_real_ts64(&now);
con_out_kvec_add(con, sizeof(tag_keepalive2), &tag_keepalive2);
ceph_encode_timespec64(&con->v1.out_temp_keepalive2, &now);
con_out_kvec_add(con, sizeof(con->v1.out_temp_keepalive2),
&con->v1.out_temp_keepalive2);
} else {
con_out_kvec_add(con, sizeof(tag_keepalive), &tag_keepalive);
}
ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
}
/*
* Connection negotiation.
*/
static int get_connect_authorizer(struct ceph_connection *con)
{
struct ceph_auth_handshake *auth;
int auth_proto;
if (!con->ops->get_authorizer) {
con->v1.auth = NULL;
con->v1.out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN;
con->v1.out_connect.authorizer_len = 0;
return 0;
}
auth = con->ops->get_authorizer(con, &auth_proto, con->v1.auth_retry);
if (IS_ERR(auth))
return PTR_ERR(auth);
con->v1.auth = auth;
con->v1.out_connect.authorizer_protocol = cpu_to_le32(auth_proto);
con->v1.out_connect.authorizer_len =
cpu_to_le32(auth->authorizer_buf_len);
return 0;
}
/*
* We connected to a peer and are saying hello.
*/
static void prepare_write_banner(struct ceph_connection *con)
{
con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER);
con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr),
&con->msgr->my_enc_addr);
con->v1.out_more = 0;
ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
}
static void __prepare_write_connect(struct ceph_connection *con)
{
con_out_kvec_add(con, sizeof(con->v1.out_connect),
&con->v1.out_connect);
if (con->v1.auth)
con_out_kvec_add(con, con->v1.auth->authorizer_buf_len,
con->v1.auth->authorizer_buf);
con->v1.out_more = 0;
ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
}
static int prepare_write_connect(struct ceph_connection *con)
{
unsigned int global_seq = ceph_get_global_seq(con->msgr, 0);
int proto;
int ret;
switch (con->peer_name.type) {
case CEPH_ENTITY_TYPE_MON:
proto = CEPH_MONC_PROTOCOL;
break;
case CEPH_ENTITY_TYPE_OSD:
proto = CEPH_OSDC_PROTOCOL;
break;
case CEPH_ENTITY_TYPE_MDS:
proto = CEPH_MDSC_PROTOCOL;
break;
default:
BUG();
}
dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
con->v1.connect_seq, global_seq, proto);
con->v1.out_connect.features =
cpu_to_le64(from_msgr(con->msgr)->supported_features);
con->v1.out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
con->v1.out_connect.connect_seq = cpu_to_le32(con->v1.connect_seq);
con->v1.out_connect.global_seq = cpu_to_le32(global_seq);
con->v1.out_connect.protocol_version = cpu_to_le32(proto);
con->v1.out_connect.flags = 0;
ret = get_connect_authorizer(con);
if (ret)
return ret;
__prepare_write_connect(con);
return 0;
}
/*
* write as much of pending kvecs to the socket as we can.
* 1 -> done
* 0 -> socket full, but more to do
* <0 -> error
*/
static int write_partial_kvec(struct ceph_connection *con)
{
int ret;
dout("write_partial_kvec %p %d left\n", con, con->v1.out_kvec_bytes);
while (con->v1.out_kvec_bytes > 0) {
ret = ceph_tcp_sendmsg(con->sock, con->v1.out_kvec_cur,
con->v1.out_kvec_left,
con->v1.out_kvec_bytes,
con->v1.out_more);
if (ret <= 0)
goto out;
con->v1.out_kvec_bytes -= ret;
if (!con->v1.out_kvec_bytes)
break; /* done */
/* account for full iov entries consumed */
while (ret >= con->v1.out_kvec_cur->iov_len) {
BUG_ON(!con->v1.out_kvec_left);
ret -= con->v1.out_kvec_cur->iov_len;
con->v1.out_kvec_cur++;
con->v1.out_kvec_left--;
}
/* and for a partially-consumed entry */
if (ret) {
con->v1.out_kvec_cur->iov_len -= ret;
con->v1.out_kvec_cur->iov_base += ret;
}
}
con->v1.out_kvec_left = 0;
ret = 1;
out:
dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
con->v1.out_kvec_bytes, con->v1.out_kvec_left, ret);
return ret; /* done! */
}
/*
* Write as much message data payload as we can. If we finish, queue
* up the footer.
* 1 -> done, footer is now queued in out_kvec[].
* 0 -> socket full, but more to do
* <0 -> error
*/
static int write_partial_message_data(struct ceph_connection *con)
{
struct ceph_msg *msg = con->out_msg;
struct ceph_msg_data_cursor *cursor = &msg->cursor;
bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
u32 crc;
dout("%s %p msg %p\n", __func__, con, msg);
if (!msg->num_data_items)
return -EINVAL;
/*
* Iterate through each page that contains data to be
* written, and send as much as possible for each.
*
* If we are calculating the data crc (the default), we will
* need to map the page. If we have no pages, they have
* been revoked, so use the zero page.
*/
crc = do_datacrc ? le32_to_cpu(msg->footer.data_crc) : 0;
while (cursor->total_resid) {
struct page *page;
size_t page_offset;
size_t length;
int ret;
if (!cursor->resid) {
ceph_msg_data_advance(cursor, 0);
continue;
}
page = ceph_msg_data_next(cursor, &page_offset, &length);
ret = ceph_tcp_sendpage(con->sock, page, page_offset, length,
MSG_MORE);
if (ret <= 0) {
if (do_datacrc)
msg->footer.data_crc = cpu_to_le32(crc);
return ret;
}
if (do_datacrc && cursor->need_crc)
crc = ceph_crc32c_page(crc, page, page_offset, length);
ceph_msg_data_advance(cursor, (size_t)ret);
}
dout("%s %p msg %p done\n", __func__, con, msg);
/* prepare and queue up footer, too */
if (do_datacrc)
msg->footer.data_crc = cpu_to_le32(crc);
else
msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
con_out_kvec_reset(con);
prepare_write_message_footer(con);
return 1; /* must return > 0 to indicate success */
}
/*
* write some zeros
*/
static int write_partial_skip(struct ceph_connection *con)
{
int ret;
dout("%s %p %d left\n", __func__, con, con->v1.out_skip);
while (con->v1.out_skip > 0) {
size_t size = min(con->v1.out_skip, (int)PAGE_SIZE);
ret = ceph_tcp_sendpage(con->sock, ceph_zero_page, 0, size,
MSG_MORE);
if (ret <= 0)
goto out;
con->v1.out_skip -= ret;
}
ret = 1;
out:
return ret;
}
/*
* Prepare to read connection handshake, or an ack.
*/
static void prepare_read_banner(struct ceph_connection *con)
{
dout("prepare_read_banner %p\n", con);
con->v1.in_base_pos = 0;
}
static void prepare_read_connect(struct ceph_connection *con)
{
dout("prepare_read_connect %p\n", con);
con->v1.in_base_pos = 0;
}
static void prepare_read_ack(struct ceph_connection *con)
{
dout("prepare_read_ack %p\n", con);
con->v1.in_base_pos = 0;
}
static void prepare_read_seq(struct ceph_connection *con)
{
dout("prepare_read_seq %p\n", con);
con->v1.in_base_pos = 0;
con->v1.in_tag = CEPH_MSGR_TAG_SEQ;
}
static void prepare_read_tag(struct ceph_connection *con)
{
dout("prepare_read_tag %p\n", con);
con->v1.in_base_pos = 0;
con->v1.in_tag = CEPH_MSGR_TAG_READY;
}
static void prepare_read_keepalive_ack(struct ceph_connection *con)
{
dout("prepare_read_keepalive_ack %p\n", con);
con->v1.in_base_pos = 0;
}
/*
* Prepare to read a message.
*/
static int prepare_read_message(struct ceph_connection *con)
{
dout("prepare_read_message %p\n", con);
BUG_ON(con->in_msg != NULL);
con->v1.in_base_pos = 0;
con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0;
return 0;
}
static int read_partial(struct ceph_connection *con,
int end, int size, void *object)
{
while (con->v1.in_base_pos < end) {
int left = end - con->v1.in_base_pos;
int have = size - left;
int ret = ceph_tcp_recvmsg(con->sock, object + have, left);
if (ret <= 0)
return ret;
con->v1.in_base_pos += ret;
}
return 1;
}
/*
* Read all or part of the connect-side handshake on a new connection
*/
static int read_partial_banner(struct ceph_connection *con)
{
int size;
int end;
int ret;
dout("read_partial_banner %p at %d\n", con, con->v1.in_base_pos);
/* peer's banner */
size = strlen(CEPH_BANNER);
end = size;
ret = read_partial(con, end, size, con->v1.in_banner);
if (ret <= 0)
goto out;
size = sizeof(con->v1.actual_peer_addr);
end += size;
ret = read_partial(con, end, size, &con->v1.actual_peer_addr);
if (ret <= 0)
goto out;
ceph_decode_banner_addr(&con->v1.actual_peer_addr);
size = sizeof(con->v1.peer_addr_for_me);
end += size;
ret = read_partial(con, end, size, &con->v1.peer_addr_for_me);
if (ret <= 0)
goto out;
ceph_decode_banner_addr(&con->v1.peer_addr_for_me);
out:
return ret;
}
static int read_partial_connect(struct ceph_connection *con)
{
int size;
int end;
int ret;
dout("read_partial_connect %p at %d\n", con, con->v1.in_base_pos);
size = sizeof(con->v1.in_reply);
end = size;
ret = read_partial(con, end, size, &con->v1.in_reply);
if (ret <= 0)
goto out;
if (con->v1.auth) {
size = le32_to_cpu(con->v1.in_reply.authorizer_len);
if (size > con->v1.auth->authorizer_reply_buf_len) {
pr_err("authorizer reply too big: %d > %zu\n", size,
con->v1.auth->authorizer_reply_buf_len);
ret = -EINVAL;
goto out;
}
end += size;
ret = read_partial(con, end, size,
con->v1.auth->authorizer_reply_buf);
if (ret <= 0)
goto out;
}
dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n",
con, con->v1.in_reply.tag,
le32_to_cpu(con->v1.in_reply.connect_seq),
le32_to_cpu(con->v1.in_reply.global_seq));
out:
return ret;
}
/*
* Verify the hello banner looks okay.
*/
static int verify_hello(struct ceph_connection *con)
{
if (memcmp(con->v1.in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) {
pr_err("connect to %s got bad banner\n",
ceph_pr_addr(&con->peer_addr));
con->error_msg = "protocol error, bad banner";
return -1;
}
return 0;
}
static int process_banner(struct ceph_connection *con)
{
struct ceph_entity_addr *my_addr = &con->msgr->inst.addr;
dout("process_banner on %p\n", con);
if (verify_hello(con) < 0)
return -1;
/*
* Make sure the other end is who we wanted. note that the other
* end may not yet know their ip address, so if it's 0.0.0.0, give
* them the benefit of the doubt.
*/
if (memcmp(&con->peer_addr, &con->v1.actual_peer_addr,
sizeof(con->peer_addr)) != 0 &&
!(ceph_addr_is_blank(&con->v1.actual_peer_addr) &&
con->v1.actual_peer_addr.nonce == con->peer_addr.nonce)) {
pr_warn("wrong peer, want %s/%u, got %s/%u\n",
ceph_pr_addr(&con->peer_addr),
le32_to_cpu(con->peer_addr.nonce),
ceph_pr_addr(&con->v1.actual_peer_addr),
le32_to_cpu(con->v1.actual_peer_addr.nonce));
con->error_msg = "wrong peer at address";
return -1;
}
/*
* did we learn our address?
*/
if (ceph_addr_is_blank(my_addr)) {
memcpy(&my_addr->in_addr,
&con->v1.peer_addr_for_me.in_addr,
sizeof(con->v1.peer_addr_for_me.in_addr));
ceph_addr_set_port(my_addr, 0);
ceph_encode_my_addr(con->msgr);
dout("process_banner learned my addr is %s\n",
ceph_pr_addr(my_addr));
}
return 0;
}
static int process_connect(struct ceph_connection *con)
{
u64 sup_feat = from_msgr(con->msgr)->supported_features;
u64 req_feat = from_msgr(con->msgr)->required_features;
u64 server_feat = le64_to_cpu(con->v1.in_reply.features);
int ret;
dout("process_connect on %p tag %d\n", con, con->v1.in_tag);
if (con->v1.auth) {
int len = le32_to_cpu(con->v1.in_reply.authorizer_len);
/*
* Any connection that defines ->get_authorizer()
* should also define ->add_authorizer_challenge() and
* ->verify_authorizer_reply().
*
* See get_connect_authorizer().
*/
if (con->v1.in_reply.tag ==
CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER) {
ret = con->ops->add_authorizer_challenge(
con, con->v1.auth->authorizer_reply_buf, len);
if (ret < 0)
return ret;
con_out_kvec_reset(con);
__prepare_write_connect(con);
prepare_read_connect(con);
return 0;
}
if (len) {
ret = con->ops->verify_authorizer_reply(con);
if (ret < 0) {
con->error_msg = "bad authorize reply";
return ret;
}
}
}
switch (con->v1.in_reply.tag) {
case CEPH_MSGR_TAG_FEATURES:
pr_err("%s%lld %s feature set mismatch,"
" my %llx < server's %llx, missing %llx\n",
ENTITY_NAME(con->peer_name),
ceph_pr_addr(&con->peer_addr),
sup_feat, server_feat, server_feat & ~sup_feat);
con->error_msg = "missing required protocol features";
return -1;
case CEPH_MSGR_TAG_BADPROTOVER:
pr_err("%s%lld %s protocol version mismatch,"
" my %d != server's %d\n",
ENTITY_NAME(con->peer_name),
ceph_pr_addr(&con->peer_addr),
le32_to_cpu(con->v1.out_connect.protocol_version),
le32_to_cpu(con->v1.in_reply.protocol_version));
con->error_msg = "protocol version mismatch";
return -1;
case CEPH_MSGR_TAG_BADAUTHORIZER:
con->v1.auth_retry++;
dout("process_connect %p got BADAUTHORIZER attempt %d\n", con,
con->v1.auth_retry);
if (con->v1.auth_retry == 2) {
con->error_msg = "connect authorization failure";
return -1;
}
con_out_kvec_reset(con);
ret = prepare_write_connect(con);
if (ret < 0)
return ret;
prepare_read_connect(con);
break;
case CEPH_MSGR_TAG_RESETSESSION:
/*
* If we connected with a large connect_seq but the peer
* has no record of a session with us (no connection, or
* connect_seq == 0), they will send RESETSESION to indicate
* that they must have reset their session, and may have
* dropped messages.
*/
dout("process_connect got RESET peer seq %u\n",
le32_to_cpu(con->v1.in_reply.connect_seq));
pr_info("%s%lld %s session reset\n",
ENTITY_NAME(con->peer_name),
ceph_pr_addr(&con->peer_addr));
ceph_con_reset_session(con);
con_out_kvec_reset(con);
ret = prepare_write_connect(con);
if (ret < 0)
return ret;
prepare_read_connect(con);
/* Tell ceph about it. */
mutex_unlock(&con->mutex);
if (con->ops->peer_reset)
con->ops->peer_reset(con);
mutex_lock(&con->mutex);
if (con->state != CEPH_CON_S_V1_CONNECT_MSG)
return -EAGAIN;
break;
case CEPH_MSGR_TAG_RETRY_SESSION:
/*
* If we sent a smaller connect_seq than the peer has, try
* again with a larger value.
*/
dout("process_connect got RETRY_SESSION my seq %u, peer %u\n",
le32_to_cpu(con->v1.out_connect.connect_seq),
le32_to_cpu(con->v1.in_reply.connect_seq));
con->v1.connect_seq = le32_to_cpu(con->v1.in_reply.connect_seq);
con_out_kvec_reset(con);
ret = prepare_write_connect(con);
if (ret < 0)
return ret;
prepare_read_connect(con);
break;
case CEPH_MSGR_TAG_RETRY_GLOBAL:
/*
* If we sent a smaller global_seq than the peer has, try
* again with a larger value.
*/
dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
con->v1.peer_global_seq,
le32_to_cpu(con->v1.in_reply.global_seq));
ceph_get_global_seq(con->msgr,
le32_to_cpu(con->v1.in_reply.global_seq));
con_out_kvec_reset(con);
ret = prepare_write_connect(con);
if (ret < 0)
return ret;
prepare_read_connect(con);
break;
case CEPH_MSGR_TAG_SEQ:
case CEPH_MSGR_TAG_READY:
if (req_feat & ~server_feat) {
pr_err("%s%lld %s protocol feature mismatch,"
" my required %llx > server's %llx, need %llx\n",
ENTITY_NAME(con->peer_name),
ceph_pr_addr(&con->peer_addr),
req_feat, server_feat, req_feat & ~server_feat);
con->error_msg = "missing required protocol features";
return -1;
}
WARN_ON(con->state != CEPH_CON_S_V1_CONNECT_MSG);
con->state = CEPH_CON_S_OPEN;
con->v1.auth_retry = 0; /* we authenticated; clear flag */
con->v1.peer_global_seq =
le32_to_cpu(con->v1.in_reply.global_seq);
con->v1.connect_seq++;
con->peer_features = server_feat;
dout("process_connect got READY gseq %d cseq %d (%d)\n",
con->v1.peer_global_seq,
le32_to_cpu(con->v1.in_reply.connect_seq),
con->v1.connect_seq);
WARN_ON(con->v1.connect_seq !=
le32_to_cpu(con->v1.in_reply.connect_seq));
if (con->v1.in_reply.flags & CEPH_MSG_CONNECT_LOSSY)
ceph_con_flag_set(con, CEPH_CON_F_LOSSYTX);
con->delay = 0; /* reset backoff memory */
if (con->v1.in_reply.tag == CEPH_MSGR_TAG_SEQ) {
prepare_write_seq(con);
prepare_read_seq(con);
} else {
prepare_read_tag(con);
}
break;
case CEPH_MSGR_TAG_WAIT:
/*
* If there is a connection race (we are opening
* connections to each other), one of us may just have
* to WAIT. This shouldn't happen if we are the
* client.
*/
con->error_msg = "protocol error, got WAIT as client";
return -1;
default:
con->error_msg = "protocol error, garbage tag during connect";
return -1;
}
return 0;
}
/*
* read (part of) an ack
*/
static int read_partial_ack(struct ceph_connection *con)
{
int size = sizeof(con->v1.in_temp_ack);
int end = size;
return read_partial(con, end, size, &con->v1.in_temp_ack);
}
/*
* We can finally discard anything that's been acked.
*/
static void process_ack(struct ceph_connection *con)
{
u64 ack = le64_to_cpu(con->v1.in_temp_ack);
if (con->v1.in_tag == CEPH_MSGR_TAG_ACK)
ceph_con_discard_sent(con, ack);
else
ceph_con_discard_requeued(con, ack);
prepare_read_tag(con);
}
static int read_partial_message_chunk(struct ceph_connection *con,
struct kvec *section,
unsigned int sec_len, u32 *crc)
{
int ret, left;
BUG_ON(!section);
while (section->iov_len < sec_len) {
BUG_ON(section->iov_base == NULL);
left = sec_len - section->iov_len;
ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base +
section->iov_len, left);
if (ret <= 0)
return ret;
section->iov_len += ret;
}
if (section->iov_len == sec_len)
*crc = crc32c(*crc, section->iov_base, section->iov_len);
return 1;
}
static inline int read_partial_message_section(struct ceph_connection *con,
struct kvec *section,
unsigned int sec_len, u32 *crc)
{
*crc = 0;
return read_partial_message_chunk(con, section, sec_len, crc);
}
static int read_sparse_msg_extent(struct ceph_connection *con, u32 *crc)
{
struct ceph_msg_data_cursor *cursor = &con->in_msg->cursor;
bool do_bounce = ceph_test_opt(from_msgr(con->msgr), RXBOUNCE);
if (do_bounce && unlikely(!con->bounce_page)) {
con->bounce_page = alloc_page(GFP_NOIO);
if (!con->bounce_page) {
pr_err("failed to allocate bounce page\n");
return -ENOMEM;
}
}
while (cursor->sr_resid > 0) {
struct page *page, *rpage;
size_t off, len;
int ret;
page = ceph_msg_data_next(cursor, &off, &len);
rpage = do_bounce ? con->bounce_page : page;
/* clamp to what remains in extent */
len = min_t(int, len, cursor->sr_resid);
ret = ceph_tcp_recvpage(con->sock, rpage, (int)off, len);
if (ret <= 0)
return ret;
*crc = ceph_crc32c_page(*crc, rpage, off, ret);
ceph_msg_data_advance(cursor, (size_t)ret);
cursor->sr_resid -= ret;
if (do_bounce)
memcpy_page(page, off, rpage, off, ret);
}
return 1;
}
static int read_sparse_msg_data(struct ceph_connection *con)
{
struct ceph_msg_data_cursor *cursor = &con->in_msg->cursor;
bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
u32 crc = 0;
int ret = 1;
if (do_datacrc)
crc = con->in_data_crc;
do {
if (con->v1.in_sr_kvec.iov_base)
ret = read_partial_message_chunk(con,
&con->v1.in_sr_kvec,
con->v1.in_sr_len,
&crc);
else if (cursor->sr_resid > 0)
ret = read_sparse_msg_extent(con, &crc);
if (ret <= 0) {
if (do_datacrc)
con->in_data_crc = crc;
return ret;
}
memset(&con->v1.in_sr_kvec, 0, sizeof(con->v1.in_sr_kvec));
ret = con->ops->sparse_read(con, cursor,
(char **)&con->v1.in_sr_kvec.iov_base);
con->v1.in_sr_len = ret;
} while (ret > 0);
if (do_datacrc)
con->in_data_crc = crc;
return ret < 0 ? ret : 1; /* must return > 0 to indicate success */
}
static int read_partial_msg_data(struct ceph_connection *con)
{
struct ceph_msg_data_cursor *cursor = &con->in_msg->cursor;
bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
struct page *page;
size_t page_offset;
size_t length;
u32 crc = 0;
int ret;
if (do_datacrc)
crc = con->in_data_crc;
while (cursor->total_resid) {
if (!cursor->resid) {
ceph_msg_data_advance(cursor, 0);
continue;
}
page = ceph_msg_data_next(cursor, &page_offset, &length);
ret = ceph_tcp_recvpage(con->sock, page, page_offset, length);
if (ret <= 0) {
if (do_datacrc)
con->in_data_crc = crc;
return ret;
}
if (do_datacrc)
crc = ceph_crc32c_page(crc, page, page_offset, ret);
ceph_msg_data_advance(cursor, (size_t)ret);
}
if (do_datacrc)
con->in_data_crc = crc;
return 1; /* must return > 0 to indicate success */
}
static int read_partial_msg_data_bounce(struct ceph_connection *con)
{
struct ceph_msg_data_cursor *cursor = &con->in_msg->cursor;
struct page *page;
size_t off, len;
u32 crc;
int ret;
if (unlikely(!con->bounce_page)) {
con->bounce_page = alloc_page(GFP_NOIO);
if (!con->bounce_page) {
pr_err("failed to allocate bounce page\n");
return -ENOMEM;
}
}
crc = con->in_data_crc;
while (cursor->total_resid) {
if (!cursor->resid) {
ceph_msg_data_advance(cursor, 0);
continue;
}
page = ceph_msg_data_next(cursor, &off, &len);
ret = ceph_tcp_recvpage(con->sock, con->bounce_page, 0, len);
if (ret <= 0) {
con->in_data_crc = crc;
return ret;
}
crc = crc32c(crc, page_address(con->bounce_page), ret);
memcpy_to_page(page, off, page_address(con->bounce_page), ret);
ceph_msg_data_advance(cursor, ret);
}
con->in_data_crc = crc;
return 1; /* must return > 0 to indicate success */
}
/*
* read (part of) a message.
*/
static int read_partial_message(struct ceph_connection *con)
{
struct ceph_msg *m = con->in_msg;
int size;
int end;
int ret;
unsigned int front_len, middle_len, data_len;
bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
bool need_sign = (con->peer_features & CEPH_FEATURE_MSG_AUTH);
u64 seq;
u32 crc;
dout("read_partial_message con %p msg %p\n", con, m);
/* header */
size = sizeof(con->v1.in_hdr);
end = size;
ret = read_partial(con, end, size, &con->v1.in_hdr);
if (ret <= 0)
return ret;
crc = crc32c(0, &con->v1.in_hdr, offsetof(struct ceph_msg_header, crc));
if (cpu_to_le32(crc) != con->v1.in_hdr.crc) {
pr_err("read_partial_message bad hdr crc %u != expected %u\n",
crc, con->v1.in_hdr.crc);
return -EBADMSG;
}
front_len = le32_to_cpu(con->v1.in_hdr.front_len);
if (front_len > CEPH_MSG_MAX_FRONT_LEN)
return -EIO;
middle_len = le32_to_cpu(con->v1.in_hdr.middle_len);
if (middle_len > CEPH_MSG_MAX_MIDDLE_LEN)
return -EIO;
data_len = le32_to_cpu(con->v1.in_hdr.data_len);
if (data_len > CEPH_MSG_MAX_DATA_LEN)
return -EIO;
/* verify seq# */
seq = le64_to_cpu(con->v1.in_hdr.seq);
if ((s64)seq - (s64)con->in_seq < 1) {
pr_info("skipping %s%lld %s seq %lld expected %lld\n",
ENTITY_NAME(con->peer_name),
ceph_pr_addr(&con->peer_addr),
seq, con->in_seq + 1);
con->v1.in_base_pos = -front_len - middle_len - data_len -
sizeof_footer(con);
con->v1.in_tag = CEPH_MSGR_TAG_READY;
return 1;
} else if ((s64)seq - (s64)con->in_seq > 1) {
pr_err("read_partial_message bad seq %lld expected %lld\n",
seq, con->in_seq + 1);
con->error_msg = "bad message sequence # for incoming message";
return -EBADE;
}
/* allocate message? */
if (!con->in_msg) {
int skip = 0;
dout("got hdr type %d front %d data %d\n", con->v1.in_hdr.type,
front_len, data_len);
ret = ceph_con_in_msg_alloc(con, &con->v1.in_hdr, &skip);
if (ret < 0)
return ret;
BUG_ON((!con->in_msg) ^ skip);
if (skip) {
/* skip this message */
dout("alloc_msg said skip message\n");
con->v1.in_base_pos = -front_len - middle_len -
data_len - sizeof_footer(con);
con->v1.in_tag = CEPH_MSGR_TAG_READY;
con->in_seq++;
return 1;
}
BUG_ON(!con->in_msg);
BUG_ON(con->in_msg->con != con);
m = con->in_msg;
m->front.iov_len = 0; /* haven't read it yet */
if (m->middle)
m->middle->vec.iov_len = 0;
/* prepare for data payload, if any */
if (data_len)
prepare_message_data(con->in_msg, data_len);
}
/* front */
ret = read_partial_message_section(con, &m->front, front_len,
&con->in_front_crc);
if (ret <= 0)
return ret;
/* middle */
if (m->middle) {
ret = read_partial_message_section(con, &m->middle->vec,
middle_len,
&con->in_middle_crc);
if (ret <= 0)
return ret;
}
/* (page) data */
if (data_len) {
if (!m->num_data_items)
return -EIO;
if (m->sparse_read)
ret = read_sparse_msg_data(con);
else if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE))
ret = read_partial_msg_data_bounce(con);
else
ret = read_partial_msg_data(con);
if (ret <= 0)
return ret;
}
/* footer */
size = sizeof_footer(con);
end += size;
ret = read_partial(con, end, size, &m->footer);
if (ret <= 0)
return ret;
if (!need_sign) {
m->footer.flags = m->old_footer.flags;
m->footer.sig = 0;
}
dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
m, front_len, m->footer.front_crc, middle_len,
m->footer.middle_crc, data_len, m->footer.data_crc);
/* crc ok? */
if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) {
pr_err("read_partial_message %p front crc %u != exp. %u\n",
m, con->in_front_crc, m->footer.front_crc);
return -EBADMSG;
}
if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) {
pr_err("read_partial_message %p middle crc %u != exp %u\n",
m, con->in_middle_crc, m->footer.middle_crc);
return -EBADMSG;
}
if (do_datacrc &&
(m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 &&
con->in_data_crc != le32_to_cpu(m->footer.data_crc)) {
pr_err("read_partial_message %p data crc %u != exp. %u\n", m,
con->in_data_crc, le32_to_cpu(m->footer.data_crc));
return -EBADMSG;
}
if (need_sign && con->ops->check_message_signature &&
con->ops->check_message_signature(m)) {
pr_err("read_partial_message %p signature check failed\n", m);
return -EBADMSG;
}
return 1; /* done! */
}
static int read_keepalive_ack(struct ceph_connection *con)
{
struct ceph_timespec ceph_ts;
size_t size = sizeof(ceph_ts);
int ret = read_partial(con, size, size, &ceph_ts);
if (ret <= 0)
return ret;
ceph_decode_timespec64(&con->last_keepalive_ack, &ceph_ts);
prepare_read_tag(con);
return 1;
}
/*
* Read what we can from the socket.
*/
int ceph_con_v1_try_read(struct ceph_connection *con)
{
int ret = -1;
more:
dout("try_read start %p state %d\n", con, con->state);
if (con->state != CEPH_CON_S_V1_BANNER &&
con->state != CEPH_CON_S_V1_CONNECT_MSG &&
con->state != CEPH_CON_S_OPEN)
return 0;
BUG_ON(!con->sock);
dout("try_read tag %d in_base_pos %d\n", con->v1.in_tag,
con->v1.in_base_pos);
if (con->state == CEPH_CON_S_V1_BANNER) {
ret = read_partial_banner(con);
if (ret <= 0)
goto out;
ret = process_banner(con);
if (ret < 0)
goto out;
con->state = CEPH_CON_S_V1_CONNECT_MSG;
/*
* Received banner is good, exchange connection info.
* Do not reset out_kvec, as sending our banner raced
* with receiving peer banner after connect completed.
*/
ret = prepare_write_connect(con);
if (ret < 0)
goto out;
prepare_read_connect(con);
/* Send connection info before awaiting response */
goto out;
}
if (con->state == CEPH_CON_S_V1_CONNECT_MSG) {
ret = read_partial_connect(con);
if (ret <= 0)
goto out;
ret = process_connect(con);
if (ret < 0)
goto out;
goto more;
}
WARN_ON(con->state != CEPH_CON_S_OPEN);
if (con->v1.in_base_pos < 0) {
/*
* skipping + discarding content.
*/
ret = ceph_tcp_recvmsg(con->sock, NULL, -con->v1.in_base_pos);
if (ret <= 0)
goto out;
dout("skipped %d / %d bytes\n", ret, -con->v1.in_base_pos);
con->v1.in_base_pos += ret;
if (con->v1.in_base_pos)
goto more;
}
if (con->v1.in_tag == CEPH_MSGR_TAG_READY) {
/*
* what's next?
*/
ret = ceph_tcp_recvmsg(con->sock, &con->v1.in_tag, 1);
if (ret <= 0)
goto out;
dout("try_read got tag %d\n", con->v1.in_tag);
switch (con->v1.in_tag) {
case CEPH_MSGR_TAG_MSG:
prepare_read_message(con);
break;
case CEPH_MSGR_TAG_ACK:
prepare_read_ack(con);
break;
case CEPH_MSGR_TAG_KEEPALIVE2_ACK:
prepare_read_keepalive_ack(con);
break;
case CEPH_MSGR_TAG_CLOSE:
ceph_con_close_socket(con);
con->state = CEPH_CON_S_CLOSED;
goto out;
default:
goto bad_tag;
}
}
if (con->v1.in_tag == CEPH_MSGR_TAG_MSG) {
ret = read_partial_message(con);
if (ret <= 0) {
switch (ret) {
case -EBADMSG:
con->error_msg = "bad crc/signature";
fallthrough;
case -EBADE:
ret = -EIO;
break;
case -EIO:
con->error_msg = "io error";
break;
}
goto out;
}
if (con->v1.in_tag == CEPH_MSGR_TAG_READY)
goto more;
ceph_con_process_message(con);
if (con->state == CEPH_CON_S_OPEN)
prepare_read_tag(con);
goto more;
}
if (con->v1.in_tag == CEPH_MSGR_TAG_ACK ||
con->v1.in_tag == CEPH_MSGR_TAG_SEQ) {
/*
* the final handshake seq exchange is semantically
* equivalent to an ACK
*/
ret = read_partial_ack(con);
if (ret <= 0)
goto out;
process_ack(con);
goto more;
}
if (con->v1.in_tag == CEPH_MSGR_TAG_KEEPALIVE2_ACK) {
ret = read_keepalive_ack(con);
if (ret <= 0)
goto out;
goto more;
}
out:
dout("try_read done on %p ret %d\n", con, ret);
return ret;
bad_tag:
pr_err("try_read bad tag %d\n", con->v1.in_tag);
con->error_msg = "protocol error, garbage tag";
ret = -1;
goto out;
}
/*
* Write something to the socket. Called in a worker thread when the
* socket appears to be writeable and we have something ready to send.
*/
int ceph_con_v1_try_write(struct ceph_connection *con)
{
int ret = 1;
dout("try_write start %p state %d\n", con, con->state);
if (con->state != CEPH_CON_S_PREOPEN &&
con->state != CEPH_CON_S_V1_BANNER &&
con->state != CEPH_CON_S_V1_CONNECT_MSG &&
con->state != CEPH_CON_S_OPEN)
return 0;
/* open the socket first? */
if (con->state == CEPH_CON_S_PREOPEN) {
BUG_ON(con->sock);
con->state = CEPH_CON_S_V1_BANNER;
con_out_kvec_reset(con);
prepare_write_banner(con);
prepare_read_banner(con);
BUG_ON(con->in_msg);
con->v1.in_tag = CEPH_MSGR_TAG_READY;
dout("try_write initiating connect on %p new state %d\n",
con, con->state);
ret = ceph_tcp_connect(con);
if (ret < 0) {
con->error_msg = "connect error";
goto out;
}
}
more:
dout("try_write out_kvec_bytes %d\n", con->v1.out_kvec_bytes);
BUG_ON(!con->sock);
/* kvec data queued? */
if (con->v1.out_kvec_left) {
ret = write_partial_kvec(con);
if (ret <= 0)
goto out;
}
if (con->v1.out_skip) {
ret = write_partial_skip(con);
if (ret <= 0)
goto out;
}
/* msg pages? */
if (con->out_msg) {
if (con->v1.out_msg_done) {
ceph_msg_put(con->out_msg);
con->out_msg = NULL; /* we're done with this one */
goto do_next;
}
ret = write_partial_message_data(con);
if (ret == 1)
goto more; /* we need to send the footer, too! */
if (ret == 0)
goto out;
if (ret < 0) {
dout("try_write write_partial_message_data err %d\n",
ret);
goto out;
}
}
do_next:
if (con->state == CEPH_CON_S_OPEN) {
if (ceph_con_flag_test_and_clear(con,
CEPH_CON_F_KEEPALIVE_PENDING)) {
prepare_write_keepalive(con);
goto more;
}
/* is anything else pending? */
if (!list_empty(&con->out_queue)) {
prepare_write_message(con);
goto more;
}
if (con->in_seq > con->in_seq_acked) {
prepare_write_ack(con);
goto more;
}
}
/* Nothing to do! */
ceph_con_flag_clear(con, CEPH_CON_F_WRITE_PENDING);
dout("try_write nothing else to write.\n");
ret = 0;
out:
dout("try_write done on %p ret %d\n", con, ret);
return ret;
}
void ceph_con_v1_revoke(struct ceph_connection *con)
{
struct ceph_msg *msg = con->out_msg;
WARN_ON(con->v1.out_skip);
/* footer */
if (con->v1.out_msg_done) {
con->v1.out_skip += con_out_kvec_skip(con);
} else {
WARN_ON(!msg->data_length);
con->v1.out_skip += sizeof_footer(con);
}
/* data, middle, front */
if (msg->data_length)
con->v1.out_skip += msg->cursor.total_resid;
if (msg->middle)
con->v1.out_skip += con_out_kvec_skip(con);
con->v1.out_skip += con_out_kvec_skip(con);
dout("%s con %p out_kvec_bytes %d out_skip %d\n", __func__, con,
con->v1.out_kvec_bytes, con->v1.out_skip);
}
void ceph_con_v1_revoke_incoming(struct ceph_connection *con)
{
unsigned int front_len = le32_to_cpu(con->v1.in_hdr.front_len);
unsigned int middle_len = le32_to_cpu(con->v1.in_hdr.middle_len);
unsigned int data_len = le32_to_cpu(con->v1.in_hdr.data_len);
/* skip rest of message */
con->v1.in_base_pos = con->v1.in_base_pos -
sizeof(struct ceph_msg_header) -
front_len -
middle_len -
data_len -
sizeof(struct ceph_msg_footer);
con->v1.in_tag = CEPH_MSGR_TAG_READY;
con->in_seq++;
dout("%s con %p in_base_pos %d\n", __func__, con, con->v1.in_base_pos);
}
bool ceph_con_v1_opened(struct ceph_connection *con)
{
return con->v1.connect_seq;
}
void ceph_con_v1_reset_session(struct ceph_connection *con)
{
con->v1.connect_seq = 0;
con->v1.peer_global_seq = 0;
}
void ceph_con_v1_reset_protocol(struct ceph_connection *con)
{
con->v1.out_skip = 0;
}
| linux-master | net/ceph/messenger_v1.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/ceph/ceph_debug.h>
#include <linux/crc32c.h>
#include <linux/ctype.h>
#include <linux/highmem.h>
#include <linux/inet.h>
#include <linux/kthread.h>
#include <linux/net.h>
#include <linux/nsproxy.h>
#include <linux/sched/mm.h>
#include <linux/slab.h>
#include <linux/socket.h>
#include <linux/string.h>
#ifdef CONFIG_BLOCK
#include <linux/bio.h>
#endif /* CONFIG_BLOCK */
#include <linux/dns_resolver.h>
#include <net/tcp.h>
#include <trace/events/sock.h>
#include <linux/ceph/ceph_features.h>
#include <linux/ceph/libceph.h>
#include <linux/ceph/messenger.h>
#include <linux/ceph/decode.h>
#include <linux/ceph/pagelist.h>
#include <linux/export.h>
/*
* Ceph uses the messenger to exchange ceph_msg messages with other
* hosts in the system. The messenger provides ordered and reliable
* delivery. We tolerate TCP disconnects by reconnecting (with
* exponential backoff) in the case of a fault (disconnection, bad
* crc, protocol error). Acks allow sent messages to be discarded by
* the sender.
*/
/*
* We track the state of the socket on a given connection using
* values defined below. The transition to a new socket state is
* handled by a function which verifies we aren't coming from an
* unexpected state.
*
* --------
* | NEW* | transient initial state
* --------
* | con_sock_state_init()
* v
* ----------
* | CLOSED | initialized, but no socket (and no
* ---------- TCP connection)
* ^ \
* | \ con_sock_state_connecting()
* | ----------------------
* | \
* + con_sock_state_closed() \
* |+--------------------------- \
* | \ \ \
* | ----------- \ \
* | | CLOSING | socket event; \ \
* | ----------- await close \ \
* | ^ \ |
* | | \ |
* | + con_sock_state_closing() \ |
* | / \ | |
* | / --------------- | |
* | / \ v v
* | / --------------
* | / -----------------| CONNECTING | socket created, TCP
* | | / -------------- connect initiated
* | | | con_sock_state_connected()
* | | v
* -------------
* | CONNECTED | TCP connection established
* -------------
*
* State values for ceph_connection->sock_state; NEW is assumed to be 0.
*/
#define CON_SOCK_STATE_NEW 0 /* -> CLOSED */
#define CON_SOCK_STATE_CLOSED 1 /* -> CONNECTING */
#define CON_SOCK_STATE_CONNECTING 2 /* -> CONNECTED or -> CLOSING */
#define CON_SOCK_STATE_CONNECTED 3 /* -> CLOSING or -> CLOSED */
#define CON_SOCK_STATE_CLOSING 4 /* -> CLOSED */
static bool con_flag_valid(unsigned long con_flag)
{
switch (con_flag) {
case CEPH_CON_F_LOSSYTX:
case CEPH_CON_F_KEEPALIVE_PENDING:
case CEPH_CON_F_WRITE_PENDING:
case CEPH_CON_F_SOCK_CLOSED:
case CEPH_CON_F_BACKOFF:
return true;
default:
return false;
}
}
void ceph_con_flag_clear(struct ceph_connection *con, unsigned long con_flag)
{
BUG_ON(!con_flag_valid(con_flag));
clear_bit(con_flag, &con->flags);
}
void ceph_con_flag_set(struct ceph_connection *con, unsigned long con_flag)
{
BUG_ON(!con_flag_valid(con_flag));
set_bit(con_flag, &con->flags);
}
bool ceph_con_flag_test(struct ceph_connection *con, unsigned long con_flag)
{
BUG_ON(!con_flag_valid(con_flag));
return test_bit(con_flag, &con->flags);
}
bool ceph_con_flag_test_and_clear(struct ceph_connection *con,
unsigned long con_flag)
{
BUG_ON(!con_flag_valid(con_flag));
return test_and_clear_bit(con_flag, &con->flags);
}
bool ceph_con_flag_test_and_set(struct ceph_connection *con,
unsigned long con_flag)
{
BUG_ON(!con_flag_valid(con_flag));
return test_and_set_bit(con_flag, &con->flags);
}
/* Slab caches for frequently-allocated structures */
static struct kmem_cache *ceph_msg_cache;
#ifdef CONFIG_LOCKDEP
static struct lock_class_key socket_class;
#endif
static void queue_con(struct ceph_connection *con);
static void cancel_con(struct ceph_connection *con);
static void ceph_con_workfn(struct work_struct *);
static void con_fault(struct ceph_connection *con);
/*
* Nicely render a sockaddr as a string. An array of formatted
* strings is used, to approximate reentrancy.
*/
#define ADDR_STR_COUNT_LOG 5 /* log2(# address strings in array) */
#define ADDR_STR_COUNT (1 << ADDR_STR_COUNT_LOG)
#define ADDR_STR_COUNT_MASK (ADDR_STR_COUNT - 1)
#define MAX_ADDR_STR_LEN 64 /* 54 is enough */
static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
static atomic_t addr_str_seq = ATOMIC_INIT(0);
struct page *ceph_zero_page; /* used in certain error cases */
const char *ceph_pr_addr(const struct ceph_entity_addr *addr)
{
int i;
char *s;
struct sockaddr_storage ss = addr->in_addr; /* align */
struct sockaddr_in *in4 = (struct sockaddr_in *)&ss;
struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)&ss;
i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
s = addr_str[i];
switch (ss.ss_family) {
case AF_INET:
snprintf(s, MAX_ADDR_STR_LEN, "(%d)%pI4:%hu",
le32_to_cpu(addr->type), &in4->sin_addr,
ntohs(in4->sin_port));
break;
case AF_INET6:
snprintf(s, MAX_ADDR_STR_LEN, "(%d)[%pI6c]:%hu",
le32_to_cpu(addr->type), &in6->sin6_addr,
ntohs(in6->sin6_port));
break;
default:
snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %hu)",
ss.ss_family);
}
return s;
}
EXPORT_SYMBOL(ceph_pr_addr);
void ceph_encode_my_addr(struct ceph_messenger *msgr)
{
if (!ceph_msgr2(from_msgr(msgr))) {
memcpy(&msgr->my_enc_addr, &msgr->inst.addr,
sizeof(msgr->my_enc_addr));
ceph_encode_banner_addr(&msgr->my_enc_addr);
}
}
/*
* work queue for all reading and writing to/from the socket.
*/
static struct workqueue_struct *ceph_msgr_wq;
static int ceph_msgr_slab_init(void)
{
BUG_ON(ceph_msg_cache);
ceph_msg_cache = KMEM_CACHE(ceph_msg, 0);
if (!ceph_msg_cache)
return -ENOMEM;
return 0;
}
static void ceph_msgr_slab_exit(void)
{
BUG_ON(!ceph_msg_cache);
kmem_cache_destroy(ceph_msg_cache);
ceph_msg_cache = NULL;
}
static void _ceph_msgr_exit(void)
{
if (ceph_msgr_wq) {
destroy_workqueue(ceph_msgr_wq);
ceph_msgr_wq = NULL;
}
BUG_ON(!ceph_zero_page);
put_page(ceph_zero_page);
ceph_zero_page = NULL;
ceph_msgr_slab_exit();
}
int __init ceph_msgr_init(void)
{
if (ceph_msgr_slab_init())
return -ENOMEM;
BUG_ON(ceph_zero_page);
ceph_zero_page = ZERO_PAGE(0);
get_page(ceph_zero_page);
/*
* The number of active work items is limited by the number of
* connections, so leave @max_active at default.
*/
ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_MEM_RECLAIM, 0);
if (ceph_msgr_wq)
return 0;
pr_err("msgr_init failed to create workqueue\n");
_ceph_msgr_exit();
return -ENOMEM;
}
void ceph_msgr_exit(void)
{
BUG_ON(ceph_msgr_wq == NULL);
_ceph_msgr_exit();
}
void ceph_msgr_flush(void)
{
flush_workqueue(ceph_msgr_wq);
}
EXPORT_SYMBOL(ceph_msgr_flush);
/* Connection socket state transition functions */
static void con_sock_state_init(struct ceph_connection *con)
{
int old_state;
old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
if (WARN_ON(old_state != CON_SOCK_STATE_NEW))
printk("%s: unexpected old state %d\n", __func__, old_state);
dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
CON_SOCK_STATE_CLOSED);
}
static void con_sock_state_connecting(struct ceph_connection *con)
{
int old_state;
old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING);
if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED))
printk("%s: unexpected old state %d\n", __func__, old_state);
dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
CON_SOCK_STATE_CONNECTING);
}
static void con_sock_state_connected(struct ceph_connection *con)
{
int old_state;
old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED);
if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING))
printk("%s: unexpected old state %d\n", __func__, old_state);
dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
CON_SOCK_STATE_CONNECTED);
}
static void con_sock_state_closing(struct ceph_connection *con)
{
int old_state;
old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING);
if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING &&
old_state != CON_SOCK_STATE_CONNECTED &&
old_state != CON_SOCK_STATE_CLOSING))
printk("%s: unexpected old state %d\n", __func__, old_state);
dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
CON_SOCK_STATE_CLOSING);
}
static void con_sock_state_closed(struct ceph_connection *con)
{
int old_state;
old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED &&
old_state != CON_SOCK_STATE_CLOSING &&
old_state != CON_SOCK_STATE_CONNECTING &&
old_state != CON_SOCK_STATE_CLOSED))
printk("%s: unexpected old state %d\n", __func__, old_state);
dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
CON_SOCK_STATE_CLOSED);
}
/*
* socket callback functions
*/
/* data available on socket, or listen socket received a connect */
static void ceph_sock_data_ready(struct sock *sk)
{
struct ceph_connection *con = sk->sk_user_data;
trace_sk_data_ready(sk);
if (atomic_read(&con->msgr->stopping)) {
return;
}
if (sk->sk_state != TCP_CLOSE_WAIT) {
dout("%s %p state = %d, queueing work\n", __func__,
con, con->state);
queue_con(con);
}
}
/* socket has buffer space for writing */
static void ceph_sock_write_space(struct sock *sk)
{
struct ceph_connection *con = sk->sk_user_data;
/* only queue to workqueue if there is data we want to write,
* and there is sufficient space in the socket buffer to accept
* more data. clear SOCK_NOSPACE so that ceph_sock_write_space()
* doesn't get called again until try_write() fills the socket
* buffer. See net/ipv4/tcp_input.c:tcp_check_space()
* and net/core/stream.c:sk_stream_write_space().
*/
if (ceph_con_flag_test(con, CEPH_CON_F_WRITE_PENDING)) {
if (sk_stream_is_writeable(sk)) {
dout("%s %p queueing write work\n", __func__, con);
clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
queue_con(con);
}
} else {
dout("%s %p nothing to write\n", __func__, con);
}
}
/* socket's state has changed */
static void ceph_sock_state_change(struct sock *sk)
{
struct ceph_connection *con = sk->sk_user_data;
dout("%s %p state = %d sk_state = %u\n", __func__,
con, con->state, sk->sk_state);
switch (sk->sk_state) {
case TCP_CLOSE:
dout("%s TCP_CLOSE\n", __func__);
fallthrough;
case TCP_CLOSE_WAIT:
dout("%s TCP_CLOSE_WAIT\n", __func__);
con_sock_state_closing(con);
ceph_con_flag_set(con, CEPH_CON_F_SOCK_CLOSED);
queue_con(con);
break;
case TCP_ESTABLISHED:
dout("%s TCP_ESTABLISHED\n", __func__);
con_sock_state_connected(con);
queue_con(con);
break;
default: /* Everything else is uninteresting */
break;
}
}
/*
* set up socket callbacks
*/
static void set_sock_callbacks(struct socket *sock,
struct ceph_connection *con)
{
struct sock *sk = sock->sk;
sk->sk_user_data = con;
sk->sk_data_ready = ceph_sock_data_ready;
sk->sk_write_space = ceph_sock_write_space;
sk->sk_state_change = ceph_sock_state_change;
}
/*
* socket helpers
*/
/*
* initiate connection to a remote socket.
*/
int ceph_tcp_connect(struct ceph_connection *con)
{
struct sockaddr_storage ss = con->peer_addr.in_addr; /* align */
struct socket *sock;
unsigned int noio_flag;
int ret;
dout("%s con %p peer_addr %s\n", __func__, con,
ceph_pr_addr(&con->peer_addr));
BUG_ON(con->sock);
/* sock_create_kern() allocates with GFP_KERNEL */
noio_flag = memalloc_noio_save();
ret = sock_create_kern(read_pnet(&con->msgr->net), ss.ss_family,
SOCK_STREAM, IPPROTO_TCP, &sock);
memalloc_noio_restore(noio_flag);
if (ret)
return ret;
sock->sk->sk_allocation = GFP_NOFS;
sock->sk->sk_use_task_frag = false;
#ifdef CONFIG_LOCKDEP
lockdep_set_class(&sock->sk->sk_lock, &socket_class);
#endif
set_sock_callbacks(sock, con);
con_sock_state_connecting(con);
ret = sock->ops->connect(sock, (struct sockaddr *)&ss, sizeof(ss),
O_NONBLOCK);
if (ret == -EINPROGRESS) {
dout("connect %s EINPROGRESS sk_state = %u\n",
ceph_pr_addr(&con->peer_addr),
sock->sk->sk_state);
} else if (ret < 0) {
pr_err("connect %s error %d\n",
ceph_pr_addr(&con->peer_addr), ret);
sock_release(sock);
return ret;
}
if (ceph_test_opt(from_msgr(con->msgr), TCP_NODELAY))
tcp_sock_set_nodelay(sock->sk);
con->sock = sock;
return 0;
}
/*
* Shutdown/close the socket for the given connection.
*/
int ceph_con_close_socket(struct ceph_connection *con)
{
int rc = 0;
dout("%s con %p sock %p\n", __func__, con, con->sock);
if (con->sock) {
rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
sock_release(con->sock);
con->sock = NULL;
}
/*
* Forcibly clear the SOCK_CLOSED flag. It gets set
* independent of the connection mutex, and we could have
* received a socket close event before we had the chance to
* shut the socket down.
*/
ceph_con_flag_clear(con, CEPH_CON_F_SOCK_CLOSED);
con_sock_state_closed(con);
return rc;
}
static void ceph_con_reset_protocol(struct ceph_connection *con)
{
dout("%s con %p\n", __func__, con);
ceph_con_close_socket(con);
if (con->in_msg) {
WARN_ON(con->in_msg->con != con);
ceph_msg_put(con->in_msg);
con->in_msg = NULL;
}
if (con->out_msg) {
WARN_ON(con->out_msg->con != con);
ceph_msg_put(con->out_msg);
con->out_msg = NULL;
}
if (con->bounce_page) {
__free_page(con->bounce_page);
con->bounce_page = NULL;
}
if (ceph_msgr2(from_msgr(con->msgr)))
ceph_con_v2_reset_protocol(con);
else
ceph_con_v1_reset_protocol(con);
}
/*
* Reset a connection. Discard all incoming and outgoing messages
* and clear *_seq state.
*/
static void ceph_msg_remove(struct ceph_msg *msg)
{
list_del_init(&msg->list_head);
ceph_msg_put(msg);
}
static void ceph_msg_remove_list(struct list_head *head)
{
while (!list_empty(head)) {
struct ceph_msg *msg = list_first_entry(head, struct ceph_msg,
list_head);
ceph_msg_remove(msg);
}
}
void ceph_con_reset_session(struct ceph_connection *con)
{
dout("%s con %p\n", __func__, con);
WARN_ON(con->in_msg);
WARN_ON(con->out_msg);
ceph_msg_remove_list(&con->out_queue);
ceph_msg_remove_list(&con->out_sent);
con->out_seq = 0;
con->in_seq = 0;
con->in_seq_acked = 0;
if (ceph_msgr2(from_msgr(con->msgr)))
ceph_con_v2_reset_session(con);
else
ceph_con_v1_reset_session(con);
}
/*
* mark a peer down. drop any open connections.
*/
void ceph_con_close(struct ceph_connection *con)
{
mutex_lock(&con->mutex);
dout("con_close %p peer %s\n", con, ceph_pr_addr(&con->peer_addr));
con->state = CEPH_CON_S_CLOSED;
ceph_con_flag_clear(con, CEPH_CON_F_LOSSYTX); /* so we retry next
connect */
ceph_con_flag_clear(con, CEPH_CON_F_KEEPALIVE_PENDING);
ceph_con_flag_clear(con, CEPH_CON_F_WRITE_PENDING);
ceph_con_flag_clear(con, CEPH_CON_F_BACKOFF);
ceph_con_reset_protocol(con);
ceph_con_reset_session(con);
cancel_con(con);
mutex_unlock(&con->mutex);
}
EXPORT_SYMBOL(ceph_con_close);
/*
* Reopen a closed connection, with a new peer address.
*/
void ceph_con_open(struct ceph_connection *con,
__u8 entity_type, __u64 entity_num,
struct ceph_entity_addr *addr)
{
mutex_lock(&con->mutex);
dout("con_open %p %s\n", con, ceph_pr_addr(addr));
WARN_ON(con->state != CEPH_CON_S_CLOSED);
con->state = CEPH_CON_S_PREOPEN;
con->peer_name.type = (__u8) entity_type;
con->peer_name.num = cpu_to_le64(entity_num);
memcpy(&con->peer_addr, addr, sizeof(*addr));
con->delay = 0; /* reset backoff memory */
mutex_unlock(&con->mutex);
queue_con(con);
}
EXPORT_SYMBOL(ceph_con_open);
/*
* return true if this connection ever successfully opened
*/
bool ceph_con_opened(struct ceph_connection *con)
{
if (ceph_msgr2(from_msgr(con->msgr)))
return ceph_con_v2_opened(con);
return ceph_con_v1_opened(con);
}
/*
* initialize a new connection.
*/
void ceph_con_init(struct ceph_connection *con, void *private,
const struct ceph_connection_operations *ops,
struct ceph_messenger *msgr)
{
dout("con_init %p\n", con);
memset(con, 0, sizeof(*con));
con->private = private;
con->ops = ops;
con->msgr = msgr;
con_sock_state_init(con);
mutex_init(&con->mutex);
INIT_LIST_HEAD(&con->out_queue);
INIT_LIST_HEAD(&con->out_sent);
INIT_DELAYED_WORK(&con->work, ceph_con_workfn);
con->state = CEPH_CON_S_CLOSED;
}
EXPORT_SYMBOL(ceph_con_init);
/*
* We maintain a global counter to order connection attempts. Get
* a unique seq greater than @gt.
*/
u32 ceph_get_global_seq(struct ceph_messenger *msgr, u32 gt)
{
u32 ret;
spin_lock(&msgr->global_seq_lock);
if (msgr->global_seq < gt)
msgr->global_seq = gt;
ret = ++msgr->global_seq;
spin_unlock(&msgr->global_seq_lock);
return ret;
}
/*
* Discard messages that have been acked by the server.
*/
void ceph_con_discard_sent(struct ceph_connection *con, u64 ack_seq)
{
struct ceph_msg *msg;
u64 seq;
dout("%s con %p ack_seq %llu\n", __func__, con, ack_seq);
while (!list_empty(&con->out_sent)) {
msg = list_first_entry(&con->out_sent, struct ceph_msg,
list_head);
WARN_ON(msg->needs_out_seq);
seq = le64_to_cpu(msg->hdr.seq);
if (seq > ack_seq)
break;
dout("%s con %p discarding msg %p seq %llu\n", __func__, con,
msg, seq);
ceph_msg_remove(msg);
}
}
/*
* Discard messages that have been requeued in con_fault(), up to
* reconnect_seq. This avoids gratuitously resending messages that
* the server had received and handled prior to reconnect.
*/
void ceph_con_discard_requeued(struct ceph_connection *con, u64 reconnect_seq)
{
struct ceph_msg *msg;
u64 seq;
dout("%s con %p reconnect_seq %llu\n", __func__, con, reconnect_seq);
while (!list_empty(&con->out_queue)) {
msg = list_first_entry(&con->out_queue, struct ceph_msg,
list_head);
if (msg->needs_out_seq)
break;
seq = le64_to_cpu(msg->hdr.seq);
if (seq > reconnect_seq)
break;
dout("%s con %p discarding msg %p seq %llu\n", __func__, con,
msg, seq);
ceph_msg_remove(msg);
}
}
#ifdef CONFIG_BLOCK
/*
* For a bio data item, a piece is whatever remains of the next
* entry in the current bio iovec, or the first entry in the next
* bio in the list.
*/
static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor *cursor,
size_t length)
{
struct ceph_msg_data *data = cursor->data;
struct ceph_bio_iter *it = &cursor->bio_iter;
cursor->resid = min_t(size_t, length, data->bio_length);
*it = data->bio_pos;
if (cursor->resid < it->iter.bi_size)
it->iter.bi_size = cursor->resid;
BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter));
}
static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor,
size_t *page_offset,
size_t *length)
{
struct bio_vec bv = bio_iter_iovec(cursor->bio_iter.bio,
cursor->bio_iter.iter);
*page_offset = bv.bv_offset;
*length = bv.bv_len;
return bv.bv_page;
}
static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
size_t bytes)
{
struct ceph_bio_iter *it = &cursor->bio_iter;
struct page *page = bio_iter_page(it->bio, it->iter);
BUG_ON(bytes > cursor->resid);
BUG_ON(bytes > bio_iter_len(it->bio, it->iter));
cursor->resid -= bytes;
bio_advance_iter(it->bio, &it->iter, bytes);
if (!cursor->resid)
return false; /* no more data */
if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done &&
page == bio_iter_page(it->bio, it->iter)))
return false; /* more bytes to process in this segment */
if (!it->iter.bi_size) {
it->bio = it->bio->bi_next;
it->iter = it->bio->bi_iter;
if (cursor->resid < it->iter.bi_size)
it->iter.bi_size = cursor->resid;
}
BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter));
return true;
}
#endif /* CONFIG_BLOCK */
static void ceph_msg_data_bvecs_cursor_init(struct ceph_msg_data_cursor *cursor,
size_t length)
{
struct ceph_msg_data *data = cursor->data;
struct bio_vec *bvecs = data->bvec_pos.bvecs;
cursor->resid = min_t(size_t, length, data->bvec_pos.iter.bi_size);
cursor->bvec_iter = data->bvec_pos.iter;
cursor->bvec_iter.bi_size = cursor->resid;
BUG_ON(cursor->resid < bvec_iter_len(bvecs, cursor->bvec_iter));
}
static struct page *ceph_msg_data_bvecs_next(struct ceph_msg_data_cursor *cursor,
size_t *page_offset,
size_t *length)
{
struct bio_vec bv = bvec_iter_bvec(cursor->data->bvec_pos.bvecs,
cursor->bvec_iter);
*page_offset = bv.bv_offset;
*length = bv.bv_len;
return bv.bv_page;
}
static bool ceph_msg_data_bvecs_advance(struct ceph_msg_data_cursor *cursor,
size_t bytes)
{
struct bio_vec *bvecs = cursor->data->bvec_pos.bvecs;
struct page *page = bvec_iter_page(bvecs, cursor->bvec_iter);
BUG_ON(bytes > cursor->resid);
BUG_ON(bytes > bvec_iter_len(bvecs, cursor->bvec_iter));
cursor->resid -= bytes;
bvec_iter_advance(bvecs, &cursor->bvec_iter, bytes);
if (!cursor->resid)
return false; /* no more data */
if (!bytes || (cursor->bvec_iter.bi_bvec_done &&
page == bvec_iter_page(bvecs, cursor->bvec_iter)))
return false; /* more bytes to process in this segment */
BUG_ON(cursor->resid < bvec_iter_len(bvecs, cursor->bvec_iter));
return true;
}
/*
* For a page array, a piece comes from the first page in the array
* that has not already been fully consumed.
*/
static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor,
size_t length)
{
struct ceph_msg_data *data = cursor->data;
int page_count;
BUG_ON(data->type != CEPH_MSG_DATA_PAGES);
BUG_ON(!data->pages);
BUG_ON(!data->length);
cursor->resid = min(length, data->length);
page_count = calc_pages_for(data->alignment, (u64)data->length);
cursor->page_offset = data->alignment & ~PAGE_MASK;
cursor->page_index = 0;
BUG_ON(page_count > (int)USHRT_MAX);
cursor->page_count = (unsigned short)page_count;
BUG_ON(length > SIZE_MAX - cursor->page_offset);
}
static struct page *
ceph_msg_data_pages_next(struct ceph_msg_data_cursor *cursor,
size_t *page_offset, size_t *length)
{
struct ceph_msg_data *data = cursor->data;
BUG_ON(data->type != CEPH_MSG_DATA_PAGES);
BUG_ON(cursor->page_index >= cursor->page_count);
BUG_ON(cursor->page_offset >= PAGE_SIZE);
*page_offset = cursor->page_offset;
*length = min_t(size_t, cursor->resid, PAGE_SIZE - *page_offset);
return data->pages[cursor->page_index];
}
static bool ceph_msg_data_pages_advance(struct ceph_msg_data_cursor *cursor,
size_t bytes)
{
BUG_ON(cursor->data->type != CEPH_MSG_DATA_PAGES);
BUG_ON(cursor->page_offset + bytes > PAGE_SIZE);
/* Advance the cursor page offset */
cursor->resid -= bytes;
cursor->page_offset = (cursor->page_offset + bytes) & ~PAGE_MASK;
if (!bytes || cursor->page_offset)
return false; /* more bytes to process in the current page */
if (!cursor->resid)
return false; /* no more data */
/* Move on to the next page; offset is already at 0 */
BUG_ON(cursor->page_index >= cursor->page_count);
cursor->page_index++;
return true;
}
/*
* For a pagelist, a piece is whatever remains to be consumed in the
* first page in the list, or the front of the next page.
*/
static void
ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data_cursor *cursor,
size_t length)
{
struct ceph_msg_data *data = cursor->data;
struct ceph_pagelist *pagelist;
struct page *page;
BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
pagelist = data->pagelist;
BUG_ON(!pagelist);
if (!length)
return; /* pagelist can be assigned but empty */
BUG_ON(list_empty(&pagelist->head));
page = list_first_entry(&pagelist->head, struct page, lru);
cursor->resid = min(length, pagelist->length);
cursor->page = page;
cursor->offset = 0;
}
static struct page *
ceph_msg_data_pagelist_next(struct ceph_msg_data_cursor *cursor,
size_t *page_offset, size_t *length)
{
struct ceph_msg_data *data = cursor->data;
struct ceph_pagelist *pagelist;
BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
pagelist = data->pagelist;
BUG_ON(!pagelist);
BUG_ON(!cursor->page);
BUG_ON(cursor->offset + cursor->resid != pagelist->length);
/* offset of first page in pagelist is always 0 */
*page_offset = cursor->offset & ~PAGE_MASK;
*length = min_t(size_t, cursor->resid, PAGE_SIZE - *page_offset);
return cursor->page;
}
static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data_cursor *cursor,
size_t bytes)
{
struct ceph_msg_data *data = cursor->data;
struct ceph_pagelist *pagelist;
BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
pagelist = data->pagelist;
BUG_ON(!pagelist);
BUG_ON(cursor->offset + cursor->resid != pagelist->length);
BUG_ON((cursor->offset & ~PAGE_MASK) + bytes > PAGE_SIZE);
/* Advance the cursor offset */
cursor->resid -= bytes;
cursor->offset += bytes;
/* offset of first page in pagelist is always 0 */
if (!bytes || cursor->offset & ~PAGE_MASK)
return false; /* more bytes to process in the current page */
if (!cursor->resid)
return false; /* no more data */
/* Move on to the next page */
BUG_ON(list_is_last(&cursor->page->lru, &pagelist->head));
cursor->page = list_next_entry(cursor->page, lru);
return true;
}
static void ceph_msg_data_iter_cursor_init(struct ceph_msg_data_cursor *cursor,
size_t length)
{
struct ceph_msg_data *data = cursor->data;
cursor->iov_iter = data->iter;
cursor->lastlen = 0;
iov_iter_truncate(&cursor->iov_iter, length);
cursor->resid = iov_iter_count(&cursor->iov_iter);
}
static struct page *ceph_msg_data_iter_next(struct ceph_msg_data_cursor *cursor,
size_t *page_offset, size_t *length)
{
struct page *page;
ssize_t len;
if (cursor->lastlen)
iov_iter_revert(&cursor->iov_iter, cursor->lastlen);
len = iov_iter_get_pages2(&cursor->iov_iter, &page, PAGE_SIZE,
1, page_offset);
BUG_ON(len < 0);
cursor->lastlen = len;
/*
* FIXME: The assumption is that the pages represented by the iov_iter
* are pinned, with the references held by the upper-level
* callers, or by virtue of being under writeback. Eventually,
* we'll get an iov_iter_get_pages2 variant that doesn't take
* page refs. Until then, just put the page ref.
*/
VM_BUG_ON_PAGE(!PageWriteback(page) && page_count(page) < 2, page);
put_page(page);
*length = min_t(size_t, len, cursor->resid);
return page;
}
static bool ceph_msg_data_iter_advance(struct ceph_msg_data_cursor *cursor,
size_t bytes)
{
BUG_ON(bytes > cursor->resid);
cursor->resid -= bytes;
if (bytes < cursor->lastlen) {
cursor->lastlen -= bytes;
} else {
iov_iter_advance(&cursor->iov_iter, bytes - cursor->lastlen);
cursor->lastlen = 0;
}
return cursor->resid;
}
/*
* Message data is handled (sent or received) in pieces, where each
* piece resides on a single page. The network layer might not
* consume an entire piece at once. A data item's cursor keeps
* track of which piece is next to process and how much remains to
* be processed in that piece. It also tracks whether the current
* piece is the last one in the data item.
*/
static void __ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor)
{
size_t length = cursor->total_resid;
switch (cursor->data->type) {
case CEPH_MSG_DATA_PAGELIST:
ceph_msg_data_pagelist_cursor_init(cursor, length);
break;
case CEPH_MSG_DATA_PAGES:
ceph_msg_data_pages_cursor_init(cursor, length);
break;
#ifdef CONFIG_BLOCK
case CEPH_MSG_DATA_BIO:
ceph_msg_data_bio_cursor_init(cursor, length);
break;
#endif /* CONFIG_BLOCK */
case CEPH_MSG_DATA_BVECS:
ceph_msg_data_bvecs_cursor_init(cursor, length);
break;
case CEPH_MSG_DATA_ITER:
ceph_msg_data_iter_cursor_init(cursor, length);
break;
case CEPH_MSG_DATA_NONE:
default:
/* BUG(); */
break;
}
cursor->need_crc = true;
}
void ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor,
struct ceph_msg *msg, size_t length)
{
BUG_ON(!length);
BUG_ON(length > msg->data_length);
BUG_ON(!msg->num_data_items);
cursor->total_resid = length;
cursor->data = msg->data;
cursor->sr_resid = 0;
__ceph_msg_data_cursor_init(cursor);
}
/*
* Return the page containing the next piece to process for a given
* data item, and supply the page offset and length of that piece.
* Indicate whether this is the last piece in this data item.
*/
struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor,
size_t *page_offset, size_t *length)
{
struct page *page;
switch (cursor->data->type) {
case CEPH_MSG_DATA_PAGELIST:
page = ceph_msg_data_pagelist_next(cursor, page_offset, length);
break;
case CEPH_MSG_DATA_PAGES:
page = ceph_msg_data_pages_next(cursor, page_offset, length);
break;
#ifdef CONFIG_BLOCK
case CEPH_MSG_DATA_BIO:
page = ceph_msg_data_bio_next(cursor, page_offset, length);
break;
#endif /* CONFIG_BLOCK */
case CEPH_MSG_DATA_BVECS:
page = ceph_msg_data_bvecs_next(cursor, page_offset, length);
break;
case CEPH_MSG_DATA_ITER:
page = ceph_msg_data_iter_next(cursor, page_offset, length);
break;
case CEPH_MSG_DATA_NONE:
default:
page = NULL;
break;
}
BUG_ON(!page);
BUG_ON(*page_offset + *length > PAGE_SIZE);
BUG_ON(!*length);
BUG_ON(*length > cursor->resid);
return page;
}
/*
* Returns true if the result moves the cursor on to the next piece
* of the data item.
*/
void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, size_t bytes)
{
bool new_piece;
BUG_ON(bytes > cursor->resid);
switch (cursor->data->type) {
case CEPH_MSG_DATA_PAGELIST:
new_piece = ceph_msg_data_pagelist_advance(cursor, bytes);
break;
case CEPH_MSG_DATA_PAGES:
new_piece = ceph_msg_data_pages_advance(cursor, bytes);
break;
#ifdef CONFIG_BLOCK
case CEPH_MSG_DATA_BIO:
new_piece = ceph_msg_data_bio_advance(cursor, bytes);
break;
#endif /* CONFIG_BLOCK */
case CEPH_MSG_DATA_BVECS:
new_piece = ceph_msg_data_bvecs_advance(cursor, bytes);
break;
case CEPH_MSG_DATA_ITER:
new_piece = ceph_msg_data_iter_advance(cursor, bytes);
break;
case CEPH_MSG_DATA_NONE:
default:
BUG();
break;
}
cursor->total_resid -= bytes;
if (!cursor->resid && cursor->total_resid) {
cursor->data++;
__ceph_msg_data_cursor_init(cursor);
new_piece = true;
}
cursor->need_crc = new_piece;
}
u32 ceph_crc32c_page(u32 crc, struct page *page, unsigned int page_offset,
unsigned int length)
{
char *kaddr;
kaddr = kmap(page);
BUG_ON(kaddr == NULL);
crc = crc32c(crc, kaddr + page_offset, length);
kunmap(page);
return crc;
}
bool ceph_addr_is_blank(const struct ceph_entity_addr *addr)
{
struct sockaddr_storage ss = addr->in_addr; /* align */
struct in_addr *addr4 = &((struct sockaddr_in *)&ss)->sin_addr;
struct in6_addr *addr6 = &((struct sockaddr_in6 *)&ss)->sin6_addr;
switch (ss.ss_family) {
case AF_INET:
return addr4->s_addr == htonl(INADDR_ANY);
case AF_INET6:
return ipv6_addr_any(addr6);
default:
return true;
}
}
EXPORT_SYMBOL(ceph_addr_is_blank);
int ceph_addr_port(const struct ceph_entity_addr *addr)
{
switch (get_unaligned(&addr->in_addr.ss_family)) {
case AF_INET:
return ntohs(get_unaligned(&((struct sockaddr_in *)&addr->in_addr)->sin_port));
case AF_INET6:
return ntohs(get_unaligned(&((struct sockaddr_in6 *)&addr->in_addr)->sin6_port));
}
return 0;
}
void ceph_addr_set_port(struct ceph_entity_addr *addr, int p)
{
switch (get_unaligned(&addr->in_addr.ss_family)) {
case AF_INET:
put_unaligned(htons(p), &((struct sockaddr_in *)&addr->in_addr)->sin_port);
break;
case AF_INET6:
put_unaligned(htons(p), &((struct sockaddr_in6 *)&addr->in_addr)->sin6_port);
break;
}
}
/*
* Unlike other *_pton function semantics, zero indicates success.
*/
static int ceph_pton(const char *str, size_t len, struct ceph_entity_addr *addr,
char delim, const char **ipend)
{
memset(&addr->in_addr, 0, sizeof(addr->in_addr));
if (in4_pton(str, len, (u8 *)&((struct sockaddr_in *)&addr->in_addr)->sin_addr.s_addr, delim, ipend)) {
put_unaligned(AF_INET, &addr->in_addr.ss_family);
return 0;
}
if (in6_pton(str, len, (u8 *)&((struct sockaddr_in6 *)&addr->in_addr)->sin6_addr.s6_addr, delim, ipend)) {
put_unaligned(AF_INET6, &addr->in_addr.ss_family);
return 0;
}
return -EINVAL;
}
/*
* Extract hostname string and resolve using kernel DNS facility.
*/
#ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER
static int ceph_dns_resolve_name(const char *name, size_t namelen,
struct ceph_entity_addr *addr, char delim, const char **ipend)
{
const char *end, *delim_p;
char *colon_p, *ip_addr = NULL;
int ip_len, ret;
/*
* The end of the hostname occurs immediately preceding the delimiter or
* the port marker (':') where the delimiter takes precedence.
*/
delim_p = memchr(name, delim, namelen);
colon_p = memchr(name, ':', namelen);
if (delim_p && colon_p)
end = delim_p < colon_p ? delim_p : colon_p;
else if (!delim_p && colon_p)
end = colon_p;
else {
end = delim_p;
if (!end) /* case: hostname:/ */
end = name + namelen;
}
if (end <= name)
return -EINVAL;
/* do dns_resolve upcall */
ip_len = dns_query(current->nsproxy->net_ns,
NULL, name, end - name, NULL, &ip_addr, NULL, false);
if (ip_len > 0)
ret = ceph_pton(ip_addr, ip_len, addr, -1, NULL);
else
ret = -ESRCH;
kfree(ip_addr);
*ipend = end;
pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name,
ret, ret ? "failed" : ceph_pr_addr(addr));
return ret;
}
#else
static inline int ceph_dns_resolve_name(const char *name, size_t namelen,
struct ceph_entity_addr *addr, char delim, const char **ipend)
{
return -EINVAL;
}
#endif
/*
* Parse a server name (IP or hostname). If a valid IP address is not found
* then try to extract a hostname to resolve using userspace DNS upcall.
*/
static int ceph_parse_server_name(const char *name, size_t namelen,
struct ceph_entity_addr *addr, char delim, const char **ipend)
{
int ret;
ret = ceph_pton(name, namelen, addr, delim, ipend);
if (ret)
ret = ceph_dns_resolve_name(name, namelen, addr, delim, ipend);
return ret;
}
/*
* Parse an ip[:port] list into an addr array. Use the default
* monitor port if a port isn't specified.
*/
int ceph_parse_ips(const char *c, const char *end,
struct ceph_entity_addr *addr,
int max_count, int *count, char delim)
{
int i, ret = -EINVAL;
const char *p = c;
dout("parse_ips on '%.*s'\n", (int)(end-c), c);
for (i = 0; i < max_count; i++) {
char cur_delim = delim;
const char *ipend;
int port;
if (*p == '[') {
cur_delim = ']';
p++;
}
ret = ceph_parse_server_name(p, end - p, &addr[i], cur_delim,
&ipend);
if (ret)
goto bad;
ret = -EINVAL;
p = ipend;
if (cur_delim == ']') {
if (*p != ']') {
dout("missing matching ']'\n");
goto bad;
}
p++;
}
/* port? */
if (p < end && *p == ':') {
port = 0;
p++;
while (p < end && *p >= '0' && *p <= '9') {
port = (port * 10) + (*p - '0');
p++;
}
if (port == 0)
port = CEPH_MON_PORT;
else if (port > 65535)
goto bad;
} else {
port = CEPH_MON_PORT;
}
ceph_addr_set_port(&addr[i], port);
/*
* We want the type to be set according to ms_mode
* option, but options are normally parsed after mon
* addresses. Rather than complicating parsing, set
* to LEGACY and override in build_initial_monmap()
* for mon addresses and ceph_messenger_init() for
* ip option.
*/
addr[i].type = CEPH_ENTITY_ADDR_TYPE_LEGACY;
addr[i].nonce = 0;
dout("%s got %s\n", __func__, ceph_pr_addr(&addr[i]));
if (p == end)
break;
if (*p != delim)
goto bad;
p++;
}
if (p != end)
goto bad;
if (count)
*count = i + 1;
return 0;
bad:
return ret;
}
/*
* Process message. This happens in the worker thread. The callback should
* be careful not to do anything that waits on other incoming messages or it
* may deadlock.
*/
void ceph_con_process_message(struct ceph_connection *con)
{
struct ceph_msg *msg = con->in_msg;
BUG_ON(con->in_msg->con != con);
con->in_msg = NULL;
/* if first message, set peer_name */
if (con->peer_name.type == 0)
con->peer_name = msg->hdr.src;
con->in_seq++;
mutex_unlock(&con->mutex);
dout("===== %p %llu from %s%lld %d=%s len %d+%d+%d (%u %u %u) =====\n",
msg, le64_to_cpu(msg->hdr.seq),
ENTITY_NAME(msg->hdr.src),
le16_to_cpu(msg->hdr.type),
ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
le32_to_cpu(msg->hdr.front_len),
le32_to_cpu(msg->hdr.middle_len),
le32_to_cpu(msg->hdr.data_len),
con->in_front_crc, con->in_middle_crc, con->in_data_crc);
con->ops->dispatch(con, msg);
mutex_lock(&con->mutex);
}
/*
* Atomically queue work on a connection after the specified delay.
* Bump @con reference to avoid races with connection teardown.
* Returns 0 if work was queued, or an error code otherwise.
*/
static int queue_con_delay(struct ceph_connection *con, unsigned long delay)
{
if (!con->ops->get(con)) {
dout("%s %p ref count 0\n", __func__, con);
return -ENOENT;
}
if (delay >= HZ)
delay = round_jiffies_relative(delay);
dout("%s %p %lu\n", __func__, con, delay);
if (!queue_delayed_work(ceph_msgr_wq, &con->work, delay)) {
dout("%s %p - already queued\n", __func__, con);
con->ops->put(con);
return -EBUSY;
}
return 0;
}
static void queue_con(struct ceph_connection *con)
{
(void) queue_con_delay(con, 0);
}
static void cancel_con(struct ceph_connection *con)
{
if (cancel_delayed_work(&con->work)) {
dout("%s %p\n", __func__, con);
con->ops->put(con);
}
}
static bool con_sock_closed(struct ceph_connection *con)
{
if (!ceph_con_flag_test_and_clear(con, CEPH_CON_F_SOCK_CLOSED))
return false;
#define CASE(x) \
case CEPH_CON_S_ ## x: \
con->error_msg = "socket closed (con state " #x ")"; \
break;
switch (con->state) {
CASE(CLOSED);
CASE(PREOPEN);
CASE(V1_BANNER);
CASE(V1_CONNECT_MSG);
CASE(V2_BANNER_PREFIX);
CASE(V2_BANNER_PAYLOAD);
CASE(V2_HELLO);
CASE(V2_AUTH);
CASE(V2_AUTH_SIGNATURE);
CASE(V2_SESSION_CONNECT);
CASE(V2_SESSION_RECONNECT);
CASE(OPEN);
CASE(STANDBY);
default:
BUG();
}
#undef CASE
return true;
}
static bool con_backoff(struct ceph_connection *con)
{
int ret;
if (!ceph_con_flag_test_and_clear(con, CEPH_CON_F_BACKOFF))
return false;
ret = queue_con_delay(con, con->delay);
if (ret) {
dout("%s: con %p FAILED to back off %lu\n", __func__,
con, con->delay);
BUG_ON(ret == -ENOENT);
ceph_con_flag_set(con, CEPH_CON_F_BACKOFF);
}
return true;
}
/* Finish fault handling; con->mutex must *not* be held here */
static void con_fault_finish(struct ceph_connection *con)
{
dout("%s %p\n", __func__, con);
/*
* in case we faulted due to authentication, invalidate our
* current tickets so that we can get new ones.
*/
if (con->v1.auth_retry) {
dout("auth_retry %d, invalidating\n", con->v1.auth_retry);
if (con->ops->invalidate_authorizer)
con->ops->invalidate_authorizer(con);
con->v1.auth_retry = 0;
}
if (con->ops->fault)
con->ops->fault(con);
}
/*
* Do some work on a connection. Drop a connection ref when we're done.
*/
static void ceph_con_workfn(struct work_struct *work)
{
struct ceph_connection *con = container_of(work, struct ceph_connection,
work.work);
bool fault;
mutex_lock(&con->mutex);
while (true) {
int ret;
if ((fault = con_sock_closed(con))) {
dout("%s: con %p SOCK_CLOSED\n", __func__, con);
break;
}
if (con_backoff(con)) {
dout("%s: con %p BACKOFF\n", __func__, con);
break;
}
if (con->state == CEPH_CON_S_STANDBY) {
dout("%s: con %p STANDBY\n", __func__, con);
break;
}
if (con->state == CEPH_CON_S_CLOSED) {
dout("%s: con %p CLOSED\n", __func__, con);
BUG_ON(con->sock);
break;
}
if (con->state == CEPH_CON_S_PREOPEN) {
dout("%s: con %p PREOPEN\n", __func__, con);
BUG_ON(con->sock);
}
if (ceph_msgr2(from_msgr(con->msgr)))
ret = ceph_con_v2_try_read(con);
else
ret = ceph_con_v1_try_read(con);
if (ret < 0) {
if (ret == -EAGAIN)
continue;
if (!con->error_msg)
con->error_msg = "socket error on read";
fault = true;
break;
}
if (ceph_msgr2(from_msgr(con->msgr)))
ret = ceph_con_v2_try_write(con);
else
ret = ceph_con_v1_try_write(con);
if (ret < 0) {
if (ret == -EAGAIN)
continue;
if (!con->error_msg)
con->error_msg = "socket error on write";
fault = true;
}
break; /* If we make it to here, we're done */
}
if (fault)
con_fault(con);
mutex_unlock(&con->mutex);
if (fault)
con_fault_finish(con);
con->ops->put(con);
}
/*
* Generic error/fault handler. A retry mechanism is used with
* exponential backoff
*/
static void con_fault(struct ceph_connection *con)
{
dout("fault %p state %d to peer %s\n",
con, con->state, ceph_pr_addr(&con->peer_addr));
pr_warn("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
ceph_pr_addr(&con->peer_addr), con->error_msg);
con->error_msg = NULL;
WARN_ON(con->state == CEPH_CON_S_STANDBY ||
con->state == CEPH_CON_S_CLOSED);
ceph_con_reset_protocol(con);
if (ceph_con_flag_test(con, CEPH_CON_F_LOSSYTX)) {
dout("fault on LOSSYTX channel, marking CLOSED\n");
con->state = CEPH_CON_S_CLOSED;
return;
}
/* Requeue anything that hasn't been acked */
list_splice_init(&con->out_sent, &con->out_queue);
/* If there are no messages queued or keepalive pending, place
* the connection in a STANDBY state */
if (list_empty(&con->out_queue) &&
!ceph_con_flag_test(con, CEPH_CON_F_KEEPALIVE_PENDING)) {
dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
ceph_con_flag_clear(con, CEPH_CON_F_WRITE_PENDING);
con->state = CEPH_CON_S_STANDBY;
} else {
/* retry after a delay. */
con->state = CEPH_CON_S_PREOPEN;
if (!con->delay) {
con->delay = BASE_DELAY_INTERVAL;
} else if (con->delay < MAX_DELAY_INTERVAL) {
con->delay *= 2;
if (con->delay > MAX_DELAY_INTERVAL)
con->delay = MAX_DELAY_INTERVAL;
}
ceph_con_flag_set(con, CEPH_CON_F_BACKOFF);
queue_con(con);
}
}
void ceph_messenger_reset_nonce(struct ceph_messenger *msgr)
{
u32 nonce = le32_to_cpu(msgr->inst.addr.nonce) + 1000000;
msgr->inst.addr.nonce = cpu_to_le32(nonce);
ceph_encode_my_addr(msgr);
}
/*
* initialize a new messenger instance
*/
void ceph_messenger_init(struct ceph_messenger *msgr,
struct ceph_entity_addr *myaddr)
{
spin_lock_init(&msgr->global_seq_lock);
if (myaddr) {
memcpy(&msgr->inst.addr.in_addr, &myaddr->in_addr,
sizeof(msgr->inst.addr.in_addr));
ceph_addr_set_port(&msgr->inst.addr, 0);
}
/*
* Since nautilus, clients are identified using type ANY.
* For msgr1, ceph_encode_banner_addr() munges it to NONE.
*/
msgr->inst.addr.type = CEPH_ENTITY_ADDR_TYPE_ANY;
/* generate a random non-zero nonce */
do {
get_random_bytes(&msgr->inst.addr.nonce,
sizeof(msgr->inst.addr.nonce));
} while (!msgr->inst.addr.nonce);
ceph_encode_my_addr(msgr);
atomic_set(&msgr->stopping, 0);
write_pnet(&msgr->net, get_net(current->nsproxy->net_ns));
dout("%s %p\n", __func__, msgr);
}
void ceph_messenger_fini(struct ceph_messenger *msgr)
{
put_net(read_pnet(&msgr->net));
}
static void msg_con_set(struct ceph_msg *msg, struct ceph_connection *con)
{
if (msg->con)
msg->con->ops->put(msg->con);
msg->con = con ? con->ops->get(con) : NULL;
BUG_ON(msg->con != con);
}
static void clear_standby(struct ceph_connection *con)
{
/* come back from STANDBY? */
if (con->state == CEPH_CON_S_STANDBY) {
dout("clear_standby %p and ++connect_seq\n", con);
con->state = CEPH_CON_S_PREOPEN;
con->v1.connect_seq++;
WARN_ON(ceph_con_flag_test(con, CEPH_CON_F_WRITE_PENDING));
WARN_ON(ceph_con_flag_test(con, CEPH_CON_F_KEEPALIVE_PENDING));
}
}
/*
* Queue up an outgoing message on the given connection.
*
* Consumes a ref on @msg.
*/
void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
{
/* set src+dst */
msg->hdr.src = con->msgr->inst.name;
BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len));
msg->needs_out_seq = true;
mutex_lock(&con->mutex);
if (con->state == CEPH_CON_S_CLOSED) {
dout("con_send %p closed, dropping %p\n", con, msg);
ceph_msg_put(msg);
mutex_unlock(&con->mutex);
return;
}
msg_con_set(msg, con);
BUG_ON(!list_empty(&msg->list_head));
list_add_tail(&msg->list_head, &con->out_queue);
dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg,
ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type),
ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
le32_to_cpu(msg->hdr.front_len),
le32_to_cpu(msg->hdr.middle_len),
le32_to_cpu(msg->hdr.data_len));
clear_standby(con);
mutex_unlock(&con->mutex);
/* if there wasn't anything waiting to send before, queue
* new work */
if (!ceph_con_flag_test_and_set(con, CEPH_CON_F_WRITE_PENDING))
queue_con(con);
}
EXPORT_SYMBOL(ceph_con_send);
/*
* Revoke a message that was previously queued for send
*/
void ceph_msg_revoke(struct ceph_msg *msg)
{
struct ceph_connection *con = msg->con;
if (!con) {
dout("%s msg %p null con\n", __func__, msg);
return; /* Message not in our possession */
}
mutex_lock(&con->mutex);
if (list_empty(&msg->list_head)) {
WARN_ON(con->out_msg == msg);
dout("%s con %p msg %p not linked\n", __func__, con, msg);
mutex_unlock(&con->mutex);
return;
}
dout("%s con %p msg %p was linked\n", __func__, con, msg);
msg->hdr.seq = 0;
ceph_msg_remove(msg);
if (con->out_msg == msg) {
WARN_ON(con->state != CEPH_CON_S_OPEN);
dout("%s con %p msg %p was sending\n", __func__, con, msg);
if (ceph_msgr2(from_msgr(con->msgr)))
ceph_con_v2_revoke(con);
else
ceph_con_v1_revoke(con);
ceph_msg_put(con->out_msg);
con->out_msg = NULL;
} else {
dout("%s con %p msg %p not current, out_msg %p\n", __func__,
con, msg, con->out_msg);
}
mutex_unlock(&con->mutex);
}
/*
* Revoke a message that we may be reading data into
*/
void ceph_msg_revoke_incoming(struct ceph_msg *msg)
{
struct ceph_connection *con = msg->con;
if (!con) {
dout("%s msg %p null con\n", __func__, msg);
return; /* Message not in our possession */
}
mutex_lock(&con->mutex);
if (con->in_msg == msg) {
WARN_ON(con->state != CEPH_CON_S_OPEN);
dout("%s con %p msg %p was recving\n", __func__, con, msg);
if (ceph_msgr2(from_msgr(con->msgr)))
ceph_con_v2_revoke_incoming(con);
else
ceph_con_v1_revoke_incoming(con);
ceph_msg_put(con->in_msg);
con->in_msg = NULL;
} else {
dout("%s con %p msg %p not current, in_msg %p\n", __func__,
con, msg, con->in_msg);
}
mutex_unlock(&con->mutex);
}
/*
* Queue a keepalive byte to ensure the tcp connection is alive.
*/
void ceph_con_keepalive(struct ceph_connection *con)
{
dout("con_keepalive %p\n", con);
mutex_lock(&con->mutex);
clear_standby(con);
ceph_con_flag_set(con, CEPH_CON_F_KEEPALIVE_PENDING);
mutex_unlock(&con->mutex);
if (!ceph_con_flag_test_and_set(con, CEPH_CON_F_WRITE_PENDING))
queue_con(con);
}
EXPORT_SYMBOL(ceph_con_keepalive);
bool ceph_con_keepalive_expired(struct ceph_connection *con,
unsigned long interval)
{
if (interval > 0 &&
(con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2)) {
struct timespec64 now;
struct timespec64 ts;
ktime_get_real_ts64(&now);
jiffies_to_timespec64(interval, &ts);
ts = timespec64_add(con->last_keepalive_ack, ts);
return timespec64_compare(&now, &ts) >= 0;
}
return false;
}
static struct ceph_msg_data *ceph_msg_data_add(struct ceph_msg *msg)
{
BUG_ON(msg->num_data_items >= msg->max_data_items);
return &msg->data[msg->num_data_items++];
}
static void ceph_msg_data_destroy(struct ceph_msg_data *data)
{
if (data->type == CEPH_MSG_DATA_PAGES && data->own_pages) {
int num_pages = calc_pages_for(data->alignment, data->length);
ceph_release_page_vector(data->pages, num_pages);
} else if (data->type == CEPH_MSG_DATA_PAGELIST) {
ceph_pagelist_release(data->pagelist);
}
}
void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
size_t length, size_t alignment, bool own_pages)
{
struct ceph_msg_data *data;
BUG_ON(!pages);
BUG_ON(!length);
data = ceph_msg_data_add(msg);
data->type = CEPH_MSG_DATA_PAGES;
data->pages = pages;
data->length = length;
data->alignment = alignment & ~PAGE_MASK;
data->own_pages = own_pages;
msg->data_length += length;
}
EXPORT_SYMBOL(ceph_msg_data_add_pages);
void ceph_msg_data_add_pagelist(struct ceph_msg *msg,
struct ceph_pagelist *pagelist)
{
struct ceph_msg_data *data;
BUG_ON(!pagelist);
BUG_ON(!pagelist->length);
data = ceph_msg_data_add(msg);
data->type = CEPH_MSG_DATA_PAGELIST;
refcount_inc(&pagelist->refcnt);
data->pagelist = pagelist;
msg->data_length += pagelist->length;
}
EXPORT_SYMBOL(ceph_msg_data_add_pagelist);
#ifdef CONFIG_BLOCK
void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos,
u32 length)
{
struct ceph_msg_data *data;
data = ceph_msg_data_add(msg);
data->type = CEPH_MSG_DATA_BIO;
data->bio_pos = *bio_pos;
data->bio_length = length;
msg->data_length += length;
}
EXPORT_SYMBOL(ceph_msg_data_add_bio);
#endif /* CONFIG_BLOCK */
void ceph_msg_data_add_bvecs(struct ceph_msg *msg,
struct ceph_bvec_iter *bvec_pos)
{
struct ceph_msg_data *data;
data = ceph_msg_data_add(msg);
data->type = CEPH_MSG_DATA_BVECS;
data->bvec_pos = *bvec_pos;
msg->data_length += bvec_pos->iter.bi_size;
}
EXPORT_SYMBOL(ceph_msg_data_add_bvecs);
void ceph_msg_data_add_iter(struct ceph_msg *msg,
struct iov_iter *iter)
{
struct ceph_msg_data *data;
data = ceph_msg_data_add(msg);
data->type = CEPH_MSG_DATA_ITER;
data->iter = *iter;
msg->data_length += iov_iter_count(&data->iter);
}
/*
* construct a new message with given type, size
* the new msg has a ref count of 1.
*/
struct ceph_msg *ceph_msg_new2(int type, int front_len, int max_data_items,
gfp_t flags, bool can_fail)
{
struct ceph_msg *m;
m = kmem_cache_zalloc(ceph_msg_cache, flags);
if (m == NULL)
goto out;
m->hdr.type = cpu_to_le16(type);
m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT);
m->hdr.front_len = cpu_to_le32(front_len);
INIT_LIST_HEAD(&m->list_head);
kref_init(&m->kref);
/* front */
if (front_len) {
m->front.iov_base = kvmalloc(front_len, flags);
if (m->front.iov_base == NULL) {
dout("ceph_msg_new can't allocate %d bytes\n",
front_len);
goto out2;
}
} else {
m->front.iov_base = NULL;
}
m->front_alloc_len = m->front.iov_len = front_len;
if (max_data_items) {
m->data = kmalloc_array(max_data_items, sizeof(*m->data),
flags);
if (!m->data)
goto out2;
m->max_data_items = max_data_items;
}
dout("ceph_msg_new %p front %d\n", m, front_len);
return m;
out2:
ceph_msg_put(m);
out:
if (!can_fail) {
pr_err("msg_new can't create type %d front %d\n", type,
front_len);
WARN_ON(1);
} else {
dout("msg_new can't create type %d front %d\n", type,
front_len);
}
return NULL;
}
EXPORT_SYMBOL(ceph_msg_new2);
struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
bool can_fail)
{
return ceph_msg_new2(type, front_len, 0, flags, can_fail);
}
EXPORT_SYMBOL(ceph_msg_new);
/*
* Allocate "middle" portion of a message, if it is needed and wasn't
* allocated by alloc_msg. This allows us to read a small fixed-size
* per-type header in the front and then gracefully fail (i.e.,
* propagate the error to the caller based on info in the front) when
* the middle is too large.
*/
static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg)
{
int type = le16_to_cpu(msg->hdr.type);
int middle_len = le32_to_cpu(msg->hdr.middle_len);
dout("alloc_middle %p type %d %s middle_len %d\n", msg, type,
ceph_msg_type_name(type), middle_len);
BUG_ON(!middle_len);
BUG_ON(msg->middle);
msg->middle = ceph_buffer_new(middle_len, GFP_NOFS);
if (!msg->middle)
return -ENOMEM;
return 0;
}
/*
* Allocate a message for receiving an incoming message on a
* connection, and save the result in con->in_msg. Uses the
* connection's private alloc_msg op if available.
*
* Returns 0 on success, or a negative error code.
*
* On success, if we set *skip = 1:
* - the next message should be skipped and ignored.
* - con->in_msg == NULL
* or if we set *skip = 0:
* - con->in_msg is non-null.
* On error (ENOMEM, EAGAIN, ...),
* - con->in_msg == NULL
*/
int ceph_con_in_msg_alloc(struct ceph_connection *con,
struct ceph_msg_header *hdr, int *skip)
{
int middle_len = le32_to_cpu(hdr->middle_len);
struct ceph_msg *msg;
int ret = 0;
BUG_ON(con->in_msg != NULL);
BUG_ON(!con->ops->alloc_msg);
mutex_unlock(&con->mutex);
msg = con->ops->alloc_msg(con, hdr, skip);
mutex_lock(&con->mutex);
if (con->state != CEPH_CON_S_OPEN) {
if (msg)
ceph_msg_put(msg);
return -EAGAIN;
}
if (msg) {
BUG_ON(*skip);
msg_con_set(msg, con);
con->in_msg = msg;
} else {
/*
* Null message pointer means either we should skip
* this message or we couldn't allocate memory. The
* former is not an error.
*/
if (*skip)
return 0;
con->error_msg = "error allocating memory for incoming message";
return -ENOMEM;
}
memcpy(&con->in_msg->hdr, hdr, sizeof(*hdr));
if (middle_len && !con->in_msg->middle) {
ret = ceph_alloc_middle(con, con->in_msg);
if (ret < 0) {
ceph_msg_put(con->in_msg);
con->in_msg = NULL;
}
}
return ret;
}
void ceph_con_get_out_msg(struct ceph_connection *con)
{
struct ceph_msg *msg;
BUG_ON(list_empty(&con->out_queue));
msg = list_first_entry(&con->out_queue, struct ceph_msg, list_head);
WARN_ON(msg->con != con);
/*
* Put the message on "sent" list using a ref from ceph_con_send().
* It is put when the message is acked or revoked.
*/
list_move_tail(&msg->list_head, &con->out_sent);
/*
* Only assign outgoing seq # if we haven't sent this message
* yet. If it is requeued, resend with it's original seq.
*/
if (msg->needs_out_seq) {
msg->hdr.seq = cpu_to_le64(++con->out_seq);
msg->needs_out_seq = false;
if (con->ops->reencode_message)
con->ops->reencode_message(msg);
}
/*
* Get a ref for out_msg. It is put when we are done sending the
* message or in case of a fault.
*/
WARN_ON(con->out_msg);
con->out_msg = ceph_msg_get(msg);
}
/*
* Free a generically kmalloc'd message.
*/
static void ceph_msg_free(struct ceph_msg *m)
{
dout("%s %p\n", __func__, m);
kvfree(m->front.iov_base);
kfree(m->data);
kmem_cache_free(ceph_msg_cache, m);
}
static void ceph_msg_release(struct kref *kref)
{
struct ceph_msg *m = container_of(kref, struct ceph_msg, kref);
int i;
dout("%s %p\n", __func__, m);
WARN_ON(!list_empty(&m->list_head));
msg_con_set(m, NULL);
/* drop middle, data, if any */
if (m->middle) {
ceph_buffer_put(m->middle);
m->middle = NULL;
}
for (i = 0; i < m->num_data_items; i++)
ceph_msg_data_destroy(&m->data[i]);
if (m->pool)
ceph_msgpool_put(m->pool, m);
else
ceph_msg_free(m);
}
struct ceph_msg *ceph_msg_get(struct ceph_msg *msg)
{
dout("%s %p (was %d)\n", __func__, msg,
kref_read(&msg->kref));
kref_get(&msg->kref);
return msg;
}
EXPORT_SYMBOL(ceph_msg_get);
void ceph_msg_put(struct ceph_msg *msg)
{
dout("%s %p (was %d)\n", __func__, msg,
kref_read(&msg->kref));
kref_put(&msg->kref, ceph_msg_release);
}
EXPORT_SYMBOL(ceph_msg_put);
void ceph_msg_dump(struct ceph_msg *msg)
{
pr_debug("msg_dump %p (front_alloc_len %d length %zd)\n", msg,
msg->front_alloc_len, msg->data_length);
print_hex_dump(KERN_DEBUG, "header: ",
DUMP_PREFIX_OFFSET, 16, 1,
&msg->hdr, sizeof(msg->hdr), true);
print_hex_dump(KERN_DEBUG, " front: ",
DUMP_PREFIX_OFFSET, 16, 1,
msg->front.iov_base, msg->front.iov_len, true);
if (msg->middle)
print_hex_dump(KERN_DEBUG, "middle: ",
DUMP_PREFIX_OFFSET, 16, 1,
msg->middle->vec.iov_base,
msg->middle->vec.iov_len, true);
print_hex_dump(KERN_DEBUG, "footer: ",
DUMP_PREFIX_OFFSET, 16, 1,
&msg->footer, sizeof(msg->footer), true);
}
EXPORT_SYMBOL(ceph_msg_dump);
| linux-master | net/ceph/messenger.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* snapshot.c Ceph snapshot context utility routines (part of libceph)
*
* Copyright (C) 2013 Inktank Storage, Inc.
*/
#include <linux/types.h>
#include <linux/export.h>
#include <linux/ceph/libceph.h>
/*
* Ceph snapshot contexts are reference counted objects, and the
* returned structure holds a single reference. Acquire additional
* references with ceph_get_snap_context(), and release them with
* ceph_put_snap_context(). When the reference count reaches zero
* the entire structure is freed.
*/
/*
* Create a new ceph snapshot context large enough to hold the
* indicated number of snapshot ids (which can be 0). Caller has
* to fill in snapc->seq and snapc->snaps[0..snap_count-1].
*
* Returns a null pointer if an error occurs.
*/
struct ceph_snap_context *ceph_create_snap_context(u32 snap_count,
gfp_t gfp_flags)
{
struct ceph_snap_context *snapc;
size_t size;
size = sizeof (struct ceph_snap_context);
size += snap_count * sizeof (snapc->snaps[0]);
snapc = kzalloc(size, gfp_flags);
if (!snapc)
return NULL;
refcount_set(&snapc->nref, 1);
snapc->num_snaps = snap_count;
return snapc;
}
EXPORT_SYMBOL(ceph_create_snap_context);
struct ceph_snap_context *ceph_get_snap_context(struct ceph_snap_context *sc)
{
if (sc)
refcount_inc(&sc->nref);
return sc;
}
EXPORT_SYMBOL(ceph_get_snap_context);
void ceph_put_snap_context(struct ceph_snap_context *sc)
{
if (!sc)
return;
if (refcount_dec_and_test(&sc->nref)) {
/*printk(" deleting snap_context %p\n", sc);*/
kfree(sc);
}
}
EXPORT_SYMBOL(ceph_put_snap_context);
| linux-master | net/ceph/snapshot.c |
#include <linux/ceph/types.h>
#include <linux/module.h>
/*
* Robert Jenkin's hash function.
* https://burtleburtle.net/bob/hash/evahash.html
* This is in the public domain.
*/
#define mix(a, b, c) \
do { \
a = a - b; a = a - c; a = a ^ (c >> 13); \
b = b - c; b = b - a; b = b ^ (a << 8); \
c = c - a; c = c - b; c = c ^ (b >> 13); \
a = a - b; a = a - c; a = a ^ (c >> 12); \
b = b - c; b = b - a; b = b ^ (a << 16); \
c = c - a; c = c - b; c = c ^ (b >> 5); \
a = a - b; a = a - c; a = a ^ (c >> 3); \
b = b - c; b = b - a; b = b ^ (a << 10); \
c = c - a; c = c - b; c = c ^ (b >> 15); \
} while (0)
unsigned int ceph_str_hash_rjenkins(const char *str, unsigned int length)
{
const unsigned char *k = (const unsigned char *)str;
__u32 a, b, c; /* the internal state */
__u32 len; /* how many key bytes still need mixing */
/* Set up the internal state */
len = length;
a = 0x9e3779b9; /* the golden ratio; an arbitrary value */
b = a;
c = 0; /* variable initialization of internal state */
/* handle most of the key */
while (len >= 12) {
a = a + (k[0] + ((__u32)k[1] << 8) + ((__u32)k[2] << 16) +
((__u32)k[3] << 24));
b = b + (k[4] + ((__u32)k[5] << 8) + ((__u32)k[6] << 16) +
((__u32)k[7] << 24));
c = c + (k[8] + ((__u32)k[9] << 8) + ((__u32)k[10] << 16) +
((__u32)k[11] << 24));
mix(a, b, c);
k = k + 12;
len = len - 12;
}
/* handle the last 11 bytes */
c = c + length;
switch (len) {
case 11:
c = c + ((__u32)k[10] << 24);
fallthrough;
case 10:
c = c + ((__u32)k[9] << 16);
fallthrough;
case 9:
c = c + ((__u32)k[8] << 8);
/* the first byte of c is reserved for the length */
fallthrough;
case 8:
b = b + ((__u32)k[7] << 24);
fallthrough;
case 7:
b = b + ((__u32)k[6] << 16);
fallthrough;
case 6:
b = b + ((__u32)k[5] << 8);
fallthrough;
case 5:
b = b + k[4];
fallthrough;
case 4:
a = a + ((__u32)k[3] << 24);
fallthrough;
case 3:
a = a + ((__u32)k[2] << 16);
fallthrough;
case 2:
a = a + ((__u32)k[1] << 8);
fallthrough;
case 1:
a = a + k[0];
/* case 0: nothing left to add */
}
mix(a, b, c);
return c;
}
/*
* linux dcache hash
*/
unsigned int ceph_str_hash_linux(const char *str, unsigned int length)
{
unsigned long hash = 0;
unsigned char c;
while (length--) {
c = *str++;
hash = (hash + (c << 4) + (c >> 4)) * 11;
}
return hash;
}
unsigned int ceph_str_hash(int type, const char *s, unsigned int len)
{
switch (type) {
case CEPH_STR_HASH_LINUX:
return ceph_str_hash_linux(s, len);
case CEPH_STR_HASH_RJENKINS:
return ceph_str_hash_rjenkins(s, len);
default:
return -1;
}
}
EXPORT_SYMBOL(ceph_str_hash);
const char *ceph_str_hash_name(int type)
{
switch (type) {
case CEPH_STR_HASH_LINUX:
return "linux";
case CEPH_STR_HASH_RJENKINS:
return "rjenkins";
default:
return "unknown";
}
}
EXPORT_SYMBOL(ceph_str_hash_name);
| linux-master | net/ceph/ceph_hash.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/ceph/pagelist.h>
struct ceph_pagelist *ceph_pagelist_alloc(gfp_t gfp_flags)
{
struct ceph_pagelist *pl;
pl = kmalloc(sizeof(*pl), gfp_flags);
if (!pl)
return NULL;
INIT_LIST_HEAD(&pl->head);
pl->mapped_tail = NULL;
pl->length = 0;
pl->room = 0;
INIT_LIST_HEAD(&pl->free_list);
pl->num_pages_free = 0;
refcount_set(&pl->refcnt, 1);
return pl;
}
EXPORT_SYMBOL(ceph_pagelist_alloc);
static void ceph_pagelist_unmap_tail(struct ceph_pagelist *pl)
{
if (pl->mapped_tail) {
struct page *page = list_entry(pl->head.prev, struct page, lru);
kunmap(page);
pl->mapped_tail = NULL;
}
}
void ceph_pagelist_release(struct ceph_pagelist *pl)
{
if (!refcount_dec_and_test(&pl->refcnt))
return;
ceph_pagelist_unmap_tail(pl);
while (!list_empty(&pl->head)) {
struct page *page = list_first_entry(&pl->head, struct page,
lru);
list_del(&page->lru);
__free_page(page);
}
ceph_pagelist_free_reserve(pl);
kfree(pl);
}
EXPORT_SYMBOL(ceph_pagelist_release);
static int ceph_pagelist_addpage(struct ceph_pagelist *pl)
{
struct page *page;
if (!pl->num_pages_free) {
page = __page_cache_alloc(GFP_NOFS);
} else {
page = list_first_entry(&pl->free_list, struct page, lru);
list_del(&page->lru);
--pl->num_pages_free;
}
if (!page)
return -ENOMEM;
pl->room += PAGE_SIZE;
ceph_pagelist_unmap_tail(pl);
list_add_tail(&page->lru, &pl->head);
pl->mapped_tail = kmap(page);
return 0;
}
int ceph_pagelist_append(struct ceph_pagelist *pl, const void *buf, size_t len)
{
while (pl->room < len) {
size_t bit = pl->room;
int ret;
memcpy(pl->mapped_tail + (pl->length & ~PAGE_MASK),
buf, bit);
pl->length += bit;
pl->room -= bit;
buf += bit;
len -= bit;
ret = ceph_pagelist_addpage(pl);
if (ret)
return ret;
}
memcpy(pl->mapped_tail + (pl->length & ~PAGE_MASK), buf, len);
pl->length += len;
pl->room -= len;
return 0;
}
EXPORT_SYMBOL(ceph_pagelist_append);
/* Allocate enough pages for a pagelist to append the given amount
* of data without allocating.
* Returns: 0 on success, -ENOMEM on error.
*/
int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space)
{
if (space <= pl->room)
return 0;
space -= pl->room;
space = (space + PAGE_SIZE - 1) >> PAGE_SHIFT; /* conv to num pages */
while (space > pl->num_pages_free) {
struct page *page = __page_cache_alloc(GFP_NOFS);
if (!page)
return -ENOMEM;
list_add_tail(&page->lru, &pl->free_list);
++pl->num_pages_free;
}
return 0;
}
EXPORT_SYMBOL(ceph_pagelist_reserve);
/* Free any pages that have been preallocated. */
int ceph_pagelist_free_reserve(struct ceph_pagelist *pl)
{
while (!list_empty(&pl->free_list)) {
struct page *page = list_first_entry(&pl->free_list,
struct page, lru);
list_del(&page->lru);
__free_page(page);
--pl->num_pages_free;
}
BUG_ON(pl->num_pages_free);
return 0;
}
EXPORT_SYMBOL(ceph_pagelist_free_reserve);
/* Create a truncation point. */
void ceph_pagelist_set_cursor(struct ceph_pagelist *pl,
struct ceph_pagelist_cursor *c)
{
c->pl = pl;
c->page_lru = pl->head.prev;
c->room = pl->room;
}
EXPORT_SYMBOL(ceph_pagelist_set_cursor);
/* Truncate a pagelist to the given point. Move extra pages to reserve.
* This won't sleep.
* Returns: 0 on success,
* -EINVAL if the pagelist doesn't match the trunc point pagelist
*/
int ceph_pagelist_truncate(struct ceph_pagelist *pl,
struct ceph_pagelist_cursor *c)
{
struct page *page;
if (pl != c->pl)
return -EINVAL;
ceph_pagelist_unmap_tail(pl);
while (pl->head.prev != c->page_lru) {
page = list_entry(pl->head.prev, struct page, lru);
/* move from pagelist to reserve */
list_move_tail(&page->lru, &pl->free_list);
++pl->num_pages_free;
}
pl->room = c->room;
if (!list_empty(&pl->head)) {
page = list_entry(pl->head.prev, struct page, lru);
pl->mapped_tail = kmap(page);
}
return 0;
}
EXPORT_SYMBOL(ceph_pagelist_truncate);
| linux-master | net/ceph/pagelist.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/ceph/ceph_debug.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/ceph/cls_lock_client.h>
#include <linux/ceph/decode.h>
#include <linux/ceph/libceph.h>
/**
* ceph_cls_lock - grab rados lock for object
* @osdc: OSD client instance
* @oid: object to lock
* @oloc: object to lock
* @lock_name: the name of the lock
* @type: lock type (CEPH_CLS_LOCK_EXCLUSIVE or CEPH_CLS_LOCK_SHARED)
* @cookie: user-defined identifier for this instance of the lock
* @tag: user-defined tag
* @desc: user-defined lock description
* @flags: lock flags
*
* All operations on the same lock should use the same tag.
*/
int ceph_cls_lock(struct ceph_osd_client *osdc,
struct ceph_object_id *oid,
struct ceph_object_locator *oloc,
char *lock_name, u8 type, char *cookie,
char *tag, char *desc, u8 flags)
{
int lock_op_buf_size;
int name_len = strlen(lock_name);
int cookie_len = strlen(cookie);
int tag_len = strlen(tag);
int desc_len = strlen(desc);
void *p, *end;
struct page *lock_op_page;
struct timespec64 mtime;
int ret;
lock_op_buf_size = name_len + sizeof(__le32) +
cookie_len + sizeof(__le32) +
tag_len + sizeof(__le32) +
desc_len + sizeof(__le32) +
sizeof(struct ceph_timespec) +
/* flag and type */
sizeof(u8) + sizeof(u8) +
CEPH_ENCODING_START_BLK_LEN;
if (lock_op_buf_size > PAGE_SIZE)
return -E2BIG;
lock_op_page = alloc_page(GFP_NOIO);
if (!lock_op_page)
return -ENOMEM;
p = page_address(lock_op_page);
end = p + lock_op_buf_size;
/* encode cls_lock_lock_op struct */
ceph_start_encoding(&p, 1, 1,
lock_op_buf_size - CEPH_ENCODING_START_BLK_LEN);
ceph_encode_string(&p, end, lock_name, name_len);
ceph_encode_8(&p, type);
ceph_encode_string(&p, end, cookie, cookie_len);
ceph_encode_string(&p, end, tag, tag_len);
ceph_encode_string(&p, end, desc, desc_len);
/* only support infinite duration */
memset(&mtime, 0, sizeof(mtime));
ceph_encode_timespec64(p, &mtime);
p += sizeof(struct ceph_timespec);
ceph_encode_8(&p, flags);
dout("%s lock_name %s type %d cookie %s tag %s desc %s flags 0x%x\n",
__func__, lock_name, type, cookie, tag, desc, flags);
ret = ceph_osdc_call(osdc, oid, oloc, "lock", "lock",
CEPH_OSD_FLAG_WRITE, lock_op_page,
lock_op_buf_size, NULL, NULL);
dout("%s: status %d\n", __func__, ret);
__free_page(lock_op_page);
return ret;
}
EXPORT_SYMBOL(ceph_cls_lock);
/**
* ceph_cls_unlock - release rados lock for object
* @osdc: OSD client instance
* @oid: object to lock
* @oloc: object to lock
* @lock_name: the name of the lock
* @cookie: user-defined identifier for this instance of the lock
*/
int ceph_cls_unlock(struct ceph_osd_client *osdc,
struct ceph_object_id *oid,
struct ceph_object_locator *oloc,
char *lock_name, char *cookie)
{
int unlock_op_buf_size;
int name_len = strlen(lock_name);
int cookie_len = strlen(cookie);
void *p, *end;
struct page *unlock_op_page;
int ret;
unlock_op_buf_size = name_len + sizeof(__le32) +
cookie_len + sizeof(__le32) +
CEPH_ENCODING_START_BLK_LEN;
if (unlock_op_buf_size > PAGE_SIZE)
return -E2BIG;
unlock_op_page = alloc_page(GFP_NOIO);
if (!unlock_op_page)
return -ENOMEM;
p = page_address(unlock_op_page);
end = p + unlock_op_buf_size;
/* encode cls_lock_unlock_op struct */
ceph_start_encoding(&p, 1, 1,
unlock_op_buf_size - CEPH_ENCODING_START_BLK_LEN);
ceph_encode_string(&p, end, lock_name, name_len);
ceph_encode_string(&p, end, cookie, cookie_len);
dout("%s lock_name %s cookie %s\n", __func__, lock_name, cookie);
ret = ceph_osdc_call(osdc, oid, oloc, "lock", "unlock",
CEPH_OSD_FLAG_WRITE, unlock_op_page,
unlock_op_buf_size, NULL, NULL);
dout("%s: status %d\n", __func__, ret);
__free_page(unlock_op_page);
return ret;
}
EXPORT_SYMBOL(ceph_cls_unlock);
/**
* ceph_cls_break_lock - release rados lock for object for specified client
* @osdc: OSD client instance
* @oid: object to lock
* @oloc: object to lock
* @lock_name: the name of the lock
* @cookie: user-defined identifier for this instance of the lock
* @locker: current lock owner
*/
int ceph_cls_break_lock(struct ceph_osd_client *osdc,
struct ceph_object_id *oid,
struct ceph_object_locator *oloc,
char *lock_name, char *cookie,
struct ceph_entity_name *locker)
{
int break_op_buf_size;
int name_len = strlen(lock_name);
int cookie_len = strlen(cookie);
struct page *break_op_page;
void *p, *end;
int ret;
break_op_buf_size = name_len + sizeof(__le32) +
cookie_len + sizeof(__le32) +
sizeof(u8) + sizeof(__le64) +
CEPH_ENCODING_START_BLK_LEN;
if (break_op_buf_size > PAGE_SIZE)
return -E2BIG;
break_op_page = alloc_page(GFP_NOIO);
if (!break_op_page)
return -ENOMEM;
p = page_address(break_op_page);
end = p + break_op_buf_size;
/* encode cls_lock_break_op struct */
ceph_start_encoding(&p, 1, 1,
break_op_buf_size - CEPH_ENCODING_START_BLK_LEN);
ceph_encode_string(&p, end, lock_name, name_len);
ceph_encode_copy(&p, locker, sizeof(*locker));
ceph_encode_string(&p, end, cookie, cookie_len);
dout("%s lock_name %s cookie %s locker %s%llu\n", __func__, lock_name,
cookie, ENTITY_NAME(*locker));
ret = ceph_osdc_call(osdc, oid, oloc, "lock", "break_lock",
CEPH_OSD_FLAG_WRITE, break_op_page,
break_op_buf_size, NULL, NULL);
dout("%s: status %d\n", __func__, ret);
__free_page(break_op_page);
return ret;
}
EXPORT_SYMBOL(ceph_cls_break_lock);
int ceph_cls_set_cookie(struct ceph_osd_client *osdc,
struct ceph_object_id *oid,
struct ceph_object_locator *oloc,
char *lock_name, u8 type, char *old_cookie,
char *tag, char *new_cookie)
{
int cookie_op_buf_size;
int name_len = strlen(lock_name);
int old_cookie_len = strlen(old_cookie);
int tag_len = strlen(tag);
int new_cookie_len = strlen(new_cookie);
void *p, *end;
struct page *cookie_op_page;
int ret;
cookie_op_buf_size = name_len + sizeof(__le32) +
old_cookie_len + sizeof(__le32) +
tag_len + sizeof(__le32) +
new_cookie_len + sizeof(__le32) +
sizeof(u8) + CEPH_ENCODING_START_BLK_LEN;
if (cookie_op_buf_size > PAGE_SIZE)
return -E2BIG;
cookie_op_page = alloc_page(GFP_NOIO);
if (!cookie_op_page)
return -ENOMEM;
p = page_address(cookie_op_page);
end = p + cookie_op_buf_size;
/* encode cls_lock_set_cookie_op struct */
ceph_start_encoding(&p, 1, 1,
cookie_op_buf_size - CEPH_ENCODING_START_BLK_LEN);
ceph_encode_string(&p, end, lock_name, name_len);
ceph_encode_8(&p, type);
ceph_encode_string(&p, end, old_cookie, old_cookie_len);
ceph_encode_string(&p, end, tag, tag_len);
ceph_encode_string(&p, end, new_cookie, new_cookie_len);
dout("%s lock_name %s type %d old_cookie %s tag %s new_cookie %s\n",
__func__, lock_name, type, old_cookie, tag, new_cookie);
ret = ceph_osdc_call(osdc, oid, oloc, "lock", "set_cookie",
CEPH_OSD_FLAG_WRITE, cookie_op_page,
cookie_op_buf_size, NULL, NULL);
dout("%s: status %d\n", __func__, ret);
__free_page(cookie_op_page);
return ret;
}
EXPORT_SYMBOL(ceph_cls_set_cookie);
void ceph_free_lockers(struct ceph_locker *lockers, u32 num_lockers)
{
int i;
for (i = 0; i < num_lockers; i++)
kfree(lockers[i].id.cookie);
kfree(lockers);
}
EXPORT_SYMBOL(ceph_free_lockers);
static int decode_locker(void **p, void *end, struct ceph_locker *locker)
{
u8 struct_v;
u32 len;
char *s;
int ret;
ret = ceph_start_decoding(p, end, 1, "locker_id_t", &struct_v, &len);
if (ret)
return ret;
ceph_decode_copy(p, &locker->id.name, sizeof(locker->id.name));
s = ceph_extract_encoded_string(p, end, NULL, GFP_NOIO);
if (IS_ERR(s))
return PTR_ERR(s);
locker->id.cookie = s;
ret = ceph_start_decoding(p, end, 1, "locker_info_t", &struct_v, &len);
if (ret)
return ret;
*p += sizeof(struct ceph_timespec); /* skip expiration */
ret = ceph_decode_entity_addr(p, end, &locker->info.addr);
if (ret)
return ret;
len = ceph_decode_32(p);
*p += len; /* skip description */
dout("%s %s%llu cookie %s addr %s\n", __func__,
ENTITY_NAME(locker->id.name), locker->id.cookie,
ceph_pr_addr(&locker->info.addr));
return 0;
}
static int decode_lockers(void **p, void *end, u8 *type, char **tag,
struct ceph_locker **lockers, u32 *num_lockers)
{
u8 struct_v;
u32 struct_len;
char *s;
int i;
int ret;
ret = ceph_start_decoding(p, end, 1, "cls_lock_get_info_reply",
&struct_v, &struct_len);
if (ret)
return ret;
*num_lockers = ceph_decode_32(p);
*lockers = kcalloc(*num_lockers, sizeof(**lockers), GFP_NOIO);
if (!*lockers)
return -ENOMEM;
for (i = 0; i < *num_lockers; i++) {
ret = decode_locker(p, end, *lockers + i);
if (ret)
goto err_free_lockers;
}
*type = ceph_decode_8(p);
s = ceph_extract_encoded_string(p, end, NULL, GFP_NOIO);
if (IS_ERR(s)) {
ret = PTR_ERR(s);
goto err_free_lockers;
}
*tag = s;
return 0;
err_free_lockers:
ceph_free_lockers(*lockers, *num_lockers);
return ret;
}
/*
* On success, the caller is responsible for:
*
* kfree(tag);
* ceph_free_lockers(lockers, num_lockers);
*/
int ceph_cls_lock_info(struct ceph_osd_client *osdc,
struct ceph_object_id *oid,
struct ceph_object_locator *oloc,
char *lock_name, u8 *type, char **tag,
struct ceph_locker **lockers, u32 *num_lockers)
{
int get_info_op_buf_size;
int name_len = strlen(lock_name);
struct page *get_info_op_page, *reply_page;
size_t reply_len = PAGE_SIZE;
void *p, *end;
int ret;
get_info_op_buf_size = name_len + sizeof(__le32) +
CEPH_ENCODING_START_BLK_LEN;
if (get_info_op_buf_size > PAGE_SIZE)
return -E2BIG;
get_info_op_page = alloc_page(GFP_NOIO);
if (!get_info_op_page)
return -ENOMEM;
reply_page = alloc_page(GFP_NOIO);
if (!reply_page) {
__free_page(get_info_op_page);
return -ENOMEM;
}
p = page_address(get_info_op_page);
end = p + get_info_op_buf_size;
/* encode cls_lock_get_info_op struct */
ceph_start_encoding(&p, 1, 1,
get_info_op_buf_size - CEPH_ENCODING_START_BLK_LEN);
ceph_encode_string(&p, end, lock_name, name_len);
dout("%s lock_name %s\n", __func__, lock_name);
ret = ceph_osdc_call(osdc, oid, oloc, "lock", "get_info",
CEPH_OSD_FLAG_READ, get_info_op_page,
get_info_op_buf_size, &reply_page, &reply_len);
dout("%s: status %d\n", __func__, ret);
if (ret >= 0) {
p = page_address(reply_page);
end = p + reply_len;
ret = decode_lockers(&p, end, type, tag, lockers, num_lockers);
}
__free_page(get_info_op_page);
__free_page(reply_page);
return ret;
}
EXPORT_SYMBOL(ceph_cls_lock_info);
int ceph_cls_assert_locked(struct ceph_osd_request *req, int which,
char *lock_name, u8 type, char *cookie, char *tag)
{
int assert_op_buf_size;
int name_len = strlen(lock_name);
int cookie_len = strlen(cookie);
int tag_len = strlen(tag);
struct page **pages;
void *p, *end;
int ret;
assert_op_buf_size = name_len + sizeof(__le32) +
cookie_len + sizeof(__le32) +
tag_len + sizeof(__le32) +
sizeof(u8) + CEPH_ENCODING_START_BLK_LEN;
if (assert_op_buf_size > PAGE_SIZE)
return -E2BIG;
ret = osd_req_op_cls_init(req, which, "lock", "assert_locked");
if (ret)
return ret;
pages = ceph_alloc_page_vector(1, GFP_NOIO);
if (IS_ERR(pages))
return PTR_ERR(pages);
p = page_address(pages[0]);
end = p + assert_op_buf_size;
/* encode cls_lock_assert_op struct */
ceph_start_encoding(&p, 1, 1,
assert_op_buf_size - CEPH_ENCODING_START_BLK_LEN);
ceph_encode_string(&p, end, lock_name, name_len);
ceph_encode_8(&p, type);
ceph_encode_string(&p, end, cookie, cookie_len);
ceph_encode_string(&p, end, tag, tag_len);
WARN_ON(p != end);
osd_req_op_cls_request_data_pages(req, which, pages, assert_op_buf_size,
0, false, true);
return 0;
}
EXPORT_SYMBOL(ceph_cls_assert_locked);
| linux-master | net/ceph/cls_lock_client.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/ceph/ceph_debug.h>
#include <linux/err.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
#include <linux/ceph/messenger.h>
#include <linux/ceph/msgpool.h>
static void *msgpool_alloc(gfp_t gfp_mask, void *arg)
{
struct ceph_msgpool *pool = arg;
struct ceph_msg *msg;
msg = ceph_msg_new2(pool->type, pool->front_len, pool->max_data_items,
gfp_mask, true);
if (!msg) {
dout("msgpool_alloc %s failed\n", pool->name);
} else {
dout("msgpool_alloc %s %p\n", pool->name, msg);
msg->pool = pool;
}
return msg;
}
static void msgpool_free(void *element, void *arg)
{
struct ceph_msgpool *pool = arg;
struct ceph_msg *msg = element;
dout("msgpool_release %s %p\n", pool->name, msg);
msg->pool = NULL;
ceph_msg_put(msg);
}
int ceph_msgpool_init(struct ceph_msgpool *pool, int type,
int front_len, int max_data_items, int size,
const char *name)
{
dout("msgpool %s init\n", name);
pool->type = type;
pool->front_len = front_len;
pool->max_data_items = max_data_items;
pool->pool = mempool_create(size, msgpool_alloc, msgpool_free, pool);
if (!pool->pool)
return -ENOMEM;
pool->name = name;
return 0;
}
void ceph_msgpool_destroy(struct ceph_msgpool *pool)
{
dout("msgpool %s destroy\n", pool->name);
mempool_destroy(pool->pool);
}
struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, int front_len,
int max_data_items)
{
struct ceph_msg *msg;
if (front_len > pool->front_len ||
max_data_items > pool->max_data_items) {
pr_warn_ratelimited("%s need %d/%d, pool %s has %d/%d\n",
__func__, front_len, max_data_items, pool->name,
pool->front_len, pool->max_data_items);
WARN_ON_ONCE(1);
/* try to alloc a fresh message */
return ceph_msg_new2(pool->type, front_len, max_data_items,
GFP_NOFS, false);
}
msg = mempool_alloc(pool->pool, GFP_NOFS);
dout("msgpool_get %s %p\n", pool->name, msg);
return msg;
}
void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg)
{
dout("msgpool_put %s %p\n", pool->name, msg);
/* reset msg front_len; user may have changed it */
msg->front.iov_len = pool->front_len;
msg->hdr.front_len = cpu_to_le32(pool->front_len);
msg->data_length = 0;
msg->num_data_items = 0;
kref_init(&msg->kref); /* retake single ref */
mempool_free(msg, pool->pool);
}
| linux-master | net/ceph/msgpool.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/ceph/ceph_debug.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#ifdef CONFIG_BLOCK
#include <linux/bio.h>
#endif
#include <linux/ceph/ceph_features.h>
#include <linux/ceph/libceph.h>
#include <linux/ceph/osd_client.h>
#include <linux/ceph/messenger.h>
#include <linux/ceph/decode.h>
#include <linux/ceph/auth.h>
#include <linux/ceph/pagelist.h>
#include <linux/ceph/striper.h>
#define OSD_OPREPLY_FRONT_LEN 512
static struct kmem_cache *ceph_osd_request_cache;
static const struct ceph_connection_operations osd_con_ops;
/*
* Implement client access to distributed object storage cluster.
*
* All data objects are stored within a cluster/cloud of OSDs, or
* "object storage devices." (Note that Ceph OSDs have _nothing_ to
* do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
* remote daemons serving up and coordinating consistent and safe
* access to storage.
*
* Cluster membership and the mapping of data objects onto storage devices
* are described by the osd map.
*
* We keep track of pending OSD requests (read, write), resubmit
* requests to different OSDs when the cluster topology/data layout
* change, or retry the affected requests when the communications
* channel with an OSD is reset.
*/
static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req);
static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req);
static void link_linger(struct ceph_osd *osd,
struct ceph_osd_linger_request *lreq);
static void unlink_linger(struct ceph_osd *osd,
struct ceph_osd_linger_request *lreq);
static void clear_backoffs(struct ceph_osd *osd);
#if 1
static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem)
{
bool wrlocked = true;
if (unlikely(down_read_trylock(sem))) {
wrlocked = false;
up_read(sem);
}
return wrlocked;
}
static inline void verify_osdc_locked(struct ceph_osd_client *osdc)
{
WARN_ON(!rwsem_is_locked(&osdc->lock));
}
static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc)
{
WARN_ON(!rwsem_is_wrlocked(&osdc->lock));
}
static inline void verify_osd_locked(struct ceph_osd *osd)
{
struct ceph_osd_client *osdc = osd->o_osdc;
WARN_ON(!(mutex_is_locked(&osd->lock) &&
rwsem_is_locked(&osdc->lock)) &&
!rwsem_is_wrlocked(&osdc->lock));
}
static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq)
{
WARN_ON(!mutex_is_locked(&lreq->lock));
}
#else
static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { }
static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { }
static inline void verify_osd_locked(struct ceph_osd *osd) { }
static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { }
#endif
/*
* calculate the mapping of a file extent onto an object, and fill out the
* request accordingly. shorten extent as necessary if it crosses an
* object boundary.
*
* fill osd op in request message.
*/
static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
u64 *objnum, u64 *objoff, u64 *objlen)
{
u64 orig_len = *plen;
u32 xlen;
/* object extent? */
ceph_calc_file_object_mapping(layout, off, orig_len, objnum,
objoff, &xlen);
*objlen = xlen;
if (*objlen < orig_len) {
*plen = *objlen;
dout(" skipping last %llu, final file extent %llu~%llu\n",
orig_len - *plen, off, *plen);
}
dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen);
return 0;
}
static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
{
memset(osd_data, 0, sizeof (*osd_data));
osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
}
/*
* Consumes @pages if @own_pages is true.
*/
static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
struct page **pages, u64 length, u32 alignment,
bool pages_from_pool, bool own_pages)
{
osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
osd_data->pages = pages;
osd_data->length = length;
osd_data->alignment = alignment;
osd_data->pages_from_pool = pages_from_pool;
osd_data->own_pages = own_pages;
}
/*
* Consumes a ref on @pagelist.
*/
static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
struct ceph_pagelist *pagelist)
{
osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST;
osd_data->pagelist = pagelist;
}
#ifdef CONFIG_BLOCK
static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
struct ceph_bio_iter *bio_pos,
u32 bio_length)
{
osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
osd_data->bio_pos = *bio_pos;
osd_data->bio_length = bio_length;
}
#endif /* CONFIG_BLOCK */
static void ceph_osd_data_bvecs_init(struct ceph_osd_data *osd_data,
struct ceph_bvec_iter *bvec_pos,
u32 num_bvecs)
{
osd_data->type = CEPH_OSD_DATA_TYPE_BVECS;
osd_data->bvec_pos = *bvec_pos;
osd_data->num_bvecs = num_bvecs;
}
static void ceph_osd_iter_init(struct ceph_osd_data *osd_data,
struct iov_iter *iter)
{
osd_data->type = CEPH_OSD_DATA_TYPE_ITER;
osd_data->iter = *iter;
}
static struct ceph_osd_data *
osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which)
{
BUG_ON(which >= osd_req->r_num_ops);
return &osd_req->r_ops[which].raw_data_in;
}
struct ceph_osd_data *
osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
unsigned int which)
{
return osd_req_op_data(osd_req, which, extent, osd_data);
}
EXPORT_SYMBOL(osd_req_op_extent_osd_data);
void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
unsigned int which, struct page **pages,
u64 length, u32 alignment,
bool pages_from_pool, bool own_pages)
{
struct ceph_osd_data *osd_data;
osd_data = osd_req_op_raw_data_in(osd_req, which);
ceph_osd_data_pages_init(osd_data, pages, length, alignment,
pages_from_pool, own_pages);
}
EXPORT_SYMBOL(osd_req_op_raw_data_in_pages);
void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
unsigned int which, struct page **pages,
u64 length, u32 alignment,
bool pages_from_pool, bool own_pages)
{
struct ceph_osd_data *osd_data;
osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
ceph_osd_data_pages_init(osd_data, pages, length, alignment,
pages_from_pool, own_pages);
}
EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req,
unsigned int which, struct ceph_pagelist *pagelist)
{
struct ceph_osd_data *osd_data;
osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
ceph_osd_data_pagelist_init(osd_data, pagelist);
}
EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist);
#ifdef CONFIG_BLOCK
void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
unsigned int which,
struct ceph_bio_iter *bio_pos,
u32 bio_length)
{
struct ceph_osd_data *osd_data;
osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
ceph_osd_data_bio_init(osd_data, bio_pos, bio_length);
}
EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio);
#endif /* CONFIG_BLOCK */
void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req,
unsigned int which,
struct bio_vec *bvecs, u32 num_bvecs,
u32 bytes)
{
struct ceph_osd_data *osd_data;
struct ceph_bvec_iter it = {
.bvecs = bvecs,
.iter = { .bi_size = bytes },
};
osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs);
}
EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvecs);
void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req,
unsigned int which,
struct ceph_bvec_iter *bvec_pos)
{
struct ceph_osd_data *osd_data;
osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
ceph_osd_data_bvecs_init(osd_data, bvec_pos, 0);
}
EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvec_pos);
/**
* osd_req_op_extent_osd_iter - Set up an operation with an iterator buffer
* @osd_req: The request to set up
* @which: Index of the operation in which to set the iter
* @iter: The buffer iterator
*/
void osd_req_op_extent_osd_iter(struct ceph_osd_request *osd_req,
unsigned int which, struct iov_iter *iter)
{
struct ceph_osd_data *osd_data;
osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
ceph_osd_iter_init(osd_data, iter);
}
EXPORT_SYMBOL(osd_req_op_extent_osd_iter);
static void osd_req_op_cls_request_info_pagelist(
struct ceph_osd_request *osd_req,
unsigned int which, struct ceph_pagelist *pagelist)
{
struct ceph_osd_data *osd_data;
osd_data = osd_req_op_data(osd_req, which, cls, request_info);
ceph_osd_data_pagelist_init(osd_data, pagelist);
}
void osd_req_op_cls_request_data_pagelist(
struct ceph_osd_request *osd_req,
unsigned int which, struct ceph_pagelist *pagelist)
{
struct ceph_osd_data *osd_data;
osd_data = osd_req_op_data(osd_req, which, cls, request_data);
ceph_osd_data_pagelist_init(osd_data, pagelist);
osd_req->r_ops[which].cls.indata_len += pagelist->length;
osd_req->r_ops[which].indata_len += pagelist->length;
}
EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
unsigned int which, struct page **pages, u64 length,
u32 alignment, bool pages_from_pool, bool own_pages)
{
struct ceph_osd_data *osd_data;
osd_data = osd_req_op_data(osd_req, which, cls, request_data);
ceph_osd_data_pages_init(osd_data, pages, length, alignment,
pages_from_pool, own_pages);
osd_req->r_ops[which].cls.indata_len += length;
osd_req->r_ops[which].indata_len += length;
}
EXPORT_SYMBOL(osd_req_op_cls_request_data_pages);
void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req,
unsigned int which,
struct bio_vec *bvecs, u32 num_bvecs,
u32 bytes)
{
struct ceph_osd_data *osd_data;
struct ceph_bvec_iter it = {
.bvecs = bvecs,
.iter = { .bi_size = bytes },
};
osd_data = osd_req_op_data(osd_req, which, cls, request_data);
ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs);
osd_req->r_ops[which].cls.indata_len += bytes;
osd_req->r_ops[which].indata_len += bytes;
}
EXPORT_SYMBOL(osd_req_op_cls_request_data_bvecs);
void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
unsigned int which, struct page **pages, u64 length,
u32 alignment, bool pages_from_pool, bool own_pages)
{
struct ceph_osd_data *osd_data;
osd_data = osd_req_op_data(osd_req, which, cls, response_data);
ceph_osd_data_pages_init(osd_data, pages, length, alignment,
pages_from_pool, own_pages);
}
EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
{
switch (osd_data->type) {
case CEPH_OSD_DATA_TYPE_NONE:
return 0;
case CEPH_OSD_DATA_TYPE_PAGES:
return osd_data->length;
case CEPH_OSD_DATA_TYPE_PAGELIST:
return (u64)osd_data->pagelist->length;
#ifdef CONFIG_BLOCK
case CEPH_OSD_DATA_TYPE_BIO:
return (u64)osd_data->bio_length;
#endif /* CONFIG_BLOCK */
case CEPH_OSD_DATA_TYPE_BVECS:
return osd_data->bvec_pos.iter.bi_size;
case CEPH_OSD_DATA_TYPE_ITER:
return iov_iter_count(&osd_data->iter);
default:
WARN(true, "unrecognized data type %d\n", (int)osd_data->type);
return 0;
}
}
static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
{
if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) {
int num_pages;
num_pages = calc_pages_for((u64)osd_data->alignment,
(u64)osd_data->length);
ceph_release_page_vector(osd_data->pages, num_pages);
} else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
ceph_pagelist_release(osd_data->pagelist);
}
ceph_osd_data_init(osd_data);
}
static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
unsigned int which)
{
struct ceph_osd_req_op *op;
BUG_ON(which >= osd_req->r_num_ops);
op = &osd_req->r_ops[which];
switch (op->op) {
case CEPH_OSD_OP_READ:
case CEPH_OSD_OP_SPARSE_READ:
case CEPH_OSD_OP_WRITE:
case CEPH_OSD_OP_WRITEFULL:
kfree(op->extent.sparse_ext);
ceph_osd_data_release(&op->extent.osd_data);
break;
case CEPH_OSD_OP_CALL:
ceph_osd_data_release(&op->cls.request_info);
ceph_osd_data_release(&op->cls.request_data);
ceph_osd_data_release(&op->cls.response_data);
break;
case CEPH_OSD_OP_SETXATTR:
case CEPH_OSD_OP_CMPXATTR:
ceph_osd_data_release(&op->xattr.osd_data);
break;
case CEPH_OSD_OP_STAT:
ceph_osd_data_release(&op->raw_data_in);
break;
case CEPH_OSD_OP_NOTIFY_ACK:
ceph_osd_data_release(&op->notify_ack.request_data);
break;
case CEPH_OSD_OP_NOTIFY:
ceph_osd_data_release(&op->notify.request_data);
ceph_osd_data_release(&op->notify.response_data);
break;
case CEPH_OSD_OP_LIST_WATCHERS:
ceph_osd_data_release(&op->list_watchers.response_data);
break;
case CEPH_OSD_OP_COPY_FROM2:
ceph_osd_data_release(&op->copy_from.osd_data);
break;
default:
break;
}
}
/*
* Assumes @t is zero-initialized.
*/
static void target_init(struct ceph_osd_request_target *t)
{
ceph_oid_init(&t->base_oid);
ceph_oloc_init(&t->base_oloc);
ceph_oid_init(&t->target_oid);
ceph_oloc_init(&t->target_oloc);
ceph_osds_init(&t->acting);
ceph_osds_init(&t->up);
t->size = -1;
t->min_size = -1;
t->osd = CEPH_HOMELESS_OSD;
}
static void target_copy(struct ceph_osd_request_target *dest,
const struct ceph_osd_request_target *src)
{
ceph_oid_copy(&dest->base_oid, &src->base_oid);
ceph_oloc_copy(&dest->base_oloc, &src->base_oloc);
ceph_oid_copy(&dest->target_oid, &src->target_oid);
ceph_oloc_copy(&dest->target_oloc, &src->target_oloc);
dest->pgid = src->pgid; /* struct */
dest->spgid = src->spgid; /* struct */
dest->pg_num = src->pg_num;
dest->pg_num_mask = src->pg_num_mask;
ceph_osds_copy(&dest->acting, &src->acting);
ceph_osds_copy(&dest->up, &src->up);
dest->size = src->size;
dest->min_size = src->min_size;
dest->sort_bitwise = src->sort_bitwise;
dest->recovery_deletes = src->recovery_deletes;
dest->flags = src->flags;
dest->used_replica = src->used_replica;
dest->paused = src->paused;
dest->epoch = src->epoch;
dest->last_force_resend = src->last_force_resend;
dest->osd = src->osd;
}
static void target_destroy(struct ceph_osd_request_target *t)
{
ceph_oid_destroy(&t->base_oid);
ceph_oloc_destroy(&t->base_oloc);
ceph_oid_destroy(&t->target_oid);
ceph_oloc_destroy(&t->target_oloc);
}
/*
* requests
*/
static void request_release_checks(struct ceph_osd_request *req)
{
WARN_ON(!RB_EMPTY_NODE(&req->r_node));
WARN_ON(!RB_EMPTY_NODE(&req->r_mc_node));
WARN_ON(!list_empty(&req->r_private_item));
WARN_ON(req->r_osd);
}
static void ceph_osdc_release_request(struct kref *kref)
{
struct ceph_osd_request *req = container_of(kref,
struct ceph_osd_request, r_kref);
unsigned int which;
dout("%s %p (r_request %p r_reply %p)\n", __func__, req,
req->r_request, req->r_reply);
request_release_checks(req);
if (req->r_request)
ceph_msg_put(req->r_request);
if (req->r_reply)
ceph_msg_put(req->r_reply);
for (which = 0; which < req->r_num_ops; which++)
osd_req_op_data_release(req, which);
target_destroy(&req->r_t);
ceph_put_snap_context(req->r_snapc);
if (req->r_mempool)
mempool_free(req, req->r_osdc->req_mempool);
else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS)
kmem_cache_free(ceph_osd_request_cache, req);
else
kfree(req);
}
void ceph_osdc_get_request(struct ceph_osd_request *req)
{
dout("%s %p (was %d)\n", __func__, req,
kref_read(&req->r_kref));
kref_get(&req->r_kref);
}
EXPORT_SYMBOL(ceph_osdc_get_request);
void ceph_osdc_put_request(struct ceph_osd_request *req)
{
if (req) {
dout("%s %p (was %d)\n", __func__, req,
kref_read(&req->r_kref));
kref_put(&req->r_kref, ceph_osdc_release_request);
}
}
EXPORT_SYMBOL(ceph_osdc_put_request);
static void request_init(struct ceph_osd_request *req)
{
/* req only, each op is zeroed in osd_req_op_init() */
memset(req, 0, sizeof(*req));
kref_init(&req->r_kref);
init_completion(&req->r_completion);
RB_CLEAR_NODE(&req->r_node);
RB_CLEAR_NODE(&req->r_mc_node);
INIT_LIST_HEAD(&req->r_private_item);
target_init(&req->r_t);
}
struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
struct ceph_snap_context *snapc,
unsigned int num_ops,
bool use_mempool,
gfp_t gfp_flags)
{
struct ceph_osd_request *req;
if (use_mempool) {
BUG_ON(num_ops > CEPH_OSD_SLAB_OPS);
req = mempool_alloc(osdc->req_mempool, gfp_flags);
} else if (num_ops <= CEPH_OSD_SLAB_OPS) {
req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags);
} else {
BUG_ON(num_ops > CEPH_OSD_MAX_OPS);
req = kmalloc(struct_size(req, r_ops, num_ops), gfp_flags);
}
if (unlikely(!req))
return NULL;
request_init(req);
req->r_osdc = osdc;
req->r_mempool = use_mempool;
req->r_num_ops = num_ops;
req->r_snapid = CEPH_NOSNAP;
req->r_snapc = ceph_get_snap_context(snapc);
dout("%s req %p\n", __func__, req);
return req;
}
EXPORT_SYMBOL(ceph_osdc_alloc_request);
static int ceph_oloc_encoding_size(const struct ceph_object_locator *oloc)
{
return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0);
}
static int __ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp,
int num_request_data_items,
int num_reply_data_items)
{
struct ceph_osd_client *osdc = req->r_osdc;
struct ceph_msg *msg;
int msg_size;
WARN_ON(req->r_request || req->r_reply);
WARN_ON(ceph_oid_empty(&req->r_base_oid));
WARN_ON(ceph_oloc_empty(&req->r_base_oloc));
/* create request message */
msg_size = CEPH_ENCODING_START_BLK_LEN +
CEPH_PGID_ENCODING_LEN + 1; /* spgid */
msg_size += 4 + 4 + 4; /* hash, osdmap_epoch, flags */
msg_size += CEPH_ENCODING_START_BLK_LEN +
sizeof(struct ceph_osd_reqid); /* reqid */
msg_size += sizeof(struct ceph_blkin_trace_info); /* trace */
msg_size += 4 + sizeof(struct ceph_timespec); /* client_inc, mtime */
msg_size += CEPH_ENCODING_START_BLK_LEN +
ceph_oloc_encoding_size(&req->r_base_oloc); /* oloc */
msg_size += 4 + req->r_base_oid.name_len; /* oid */
msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op);
msg_size += 8; /* snapid */
msg_size += 8; /* snap_seq */
msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0);
msg_size += 4 + 8; /* retry_attempt, features */
if (req->r_mempool)
msg = ceph_msgpool_get(&osdc->msgpool_op, msg_size,
num_request_data_items);
else
msg = ceph_msg_new2(CEPH_MSG_OSD_OP, msg_size,
num_request_data_items, gfp, true);
if (!msg)
return -ENOMEM;
memset(msg->front.iov_base, 0, msg->front.iov_len);
req->r_request = msg;
/* create reply message */
msg_size = OSD_OPREPLY_FRONT_LEN;
msg_size += req->r_base_oid.name_len;
msg_size += req->r_num_ops * sizeof(struct ceph_osd_op);
if (req->r_mempool)
msg = ceph_msgpool_get(&osdc->msgpool_op_reply, msg_size,
num_reply_data_items);
else
msg = ceph_msg_new2(CEPH_MSG_OSD_OPREPLY, msg_size,
num_reply_data_items, gfp, true);
if (!msg)
return -ENOMEM;
req->r_reply = msg;
return 0;
}
static bool osd_req_opcode_valid(u16 opcode)
{
switch (opcode) {
#define GENERATE_CASE(op, opcode, str) case CEPH_OSD_OP_##op: return true;
__CEPH_FORALL_OSD_OPS(GENERATE_CASE)
#undef GENERATE_CASE
default:
return false;
}
}
static void get_num_data_items(struct ceph_osd_request *req,
int *num_request_data_items,
int *num_reply_data_items)
{
struct ceph_osd_req_op *op;
*num_request_data_items = 0;
*num_reply_data_items = 0;
for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) {
switch (op->op) {
/* request */
case CEPH_OSD_OP_WRITE:
case CEPH_OSD_OP_WRITEFULL:
case CEPH_OSD_OP_SETXATTR:
case CEPH_OSD_OP_CMPXATTR:
case CEPH_OSD_OP_NOTIFY_ACK:
case CEPH_OSD_OP_COPY_FROM2:
*num_request_data_items += 1;
break;
/* reply */
case CEPH_OSD_OP_STAT:
case CEPH_OSD_OP_READ:
case CEPH_OSD_OP_SPARSE_READ:
case CEPH_OSD_OP_LIST_WATCHERS:
*num_reply_data_items += 1;
break;
/* both */
case CEPH_OSD_OP_NOTIFY:
*num_request_data_items += 1;
*num_reply_data_items += 1;
break;
case CEPH_OSD_OP_CALL:
*num_request_data_items += 2;
*num_reply_data_items += 1;
break;
default:
WARN_ON(!osd_req_opcode_valid(op->op));
break;
}
}
}
/*
* oid, oloc and OSD op opcode(s) must be filled in before this function
* is called.
*/
int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
{
int num_request_data_items, num_reply_data_items;
get_num_data_items(req, &num_request_data_items, &num_reply_data_items);
return __ceph_osdc_alloc_messages(req, gfp, num_request_data_items,
num_reply_data_items);
}
EXPORT_SYMBOL(ceph_osdc_alloc_messages);
/*
* This is an osd op init function for opcodes that have no data or
* other information associated with them. It also serves as a
* common init routine for all the other init functions, below.
*/
struct ceph_osd_req_op *
osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
u16 opcode, u32 flags)
{
struct ceph_osd_req_op *op;
BUG_ON(which >= osd_req->r_num_ops);
BUG_ON(!osd_req_opcode_valid(opcode));
op = &osd_req->r_ops[which];
memset(op, 0, sizeof (*op));
op->op = opcode;
op->flags = flags;
return op;
}
EXPORT_SYMBOL(osd_req_op_init);
void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
unsigned int which, u16 opcode,
u64 offset, u64 length,
u64 truncate_size, u32 truncate_seq)
{
struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which,
opcode, 0);
size_t payload_len = 0;
BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO &&
opcode != CEPH_OSD_OP_TRUNCATE && opcode != CEPH_OSD_OP_SPARSE_READ);
op->extent.offset = offset;
op->extent.length = length;
op->extent.truncate_size = truncate_size;
op->extent.truncate_seq = truncate_seq;
if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL)
payload_len += length;
op->indata_len = payload_len;
}
EXPORT_SYMBOL(osd_req_op_extent_init);
void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
unsigned int which, u64 length)
{
struct ceph_osd_req_op *op;
u64 previous;
BUG_ON(which >= osd_req->r_num_ops);
op = &osd_req->r_ops[which];
previous = op->extent.length;
if (length == previous)
return; /* Nothing to do */
BUG_ON(length > previous);
op->extent.length = length;
if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
op->indata_len -= previous - length;
}
EXPORT_SYMBOL(osd_req_op_extent_update);
void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
unsigned int which, u64 offset_inc)
{
struct ceph_osd_req_op *op, *prev_op;
BUG_ON(which + 1 >= osd_req->r_num_ops);
prev_op = &osd_req->r_ops[which];
op = osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags);
/* dup previous one */
op->indata_len = prev_op->indata_len;
op->outdata_len = prev_op->outdata_len;
op->extent = prev_op->extent;
/* adjust offset */
op->extent.offset += offset_inc;
op->extent.length -= offset_inc;
if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
op->indata_len -= offset_inc;
}
EXPORT_SYMBOL(osd_req_op_extent_dup_last);
int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
const char *class, const char *method)
{
struct ceph_osd_req_op *op;
struct ceph_pagelist *pagelist;
size_t payload_len = 0;
size_t size;
int ret;
op = osd_req_op_init(osd_req, which, CEPH_OSD_OP_CALL, 0);
pagelist = ceph_pagelist_alloc(GFP_NOFS);
if (!pagelist)
return -ENOMEM;
op->cls.class_name = class;
size = strlen(class);
BUG_ON(size > (size_t) U8_MAX);
op->cls.class_len = size;
ret = ceph_pagelist_append(pagelist, class, size);
if (ret)
goto err_pagelist_free;
payload_len += size;
op->cls.method_name = method;
size = strlen(method);
BUG_ON(size > (size_t) U8_MAX);
op->cls.method_len = size;
ret = ceph_pagelist_append(pagelist, method, size);
if (ret)
goto err_pagelist_free;
payload_len += size;
osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist);
op->indata_len = payload_len;
return 0;
err_pagelist_free:
ceph_pagelist_release(pagelist);
return ret;
}
EXPORT_SYMBOL(osd_req_op_cls_init);
int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
u16 opcode, const char *name, const void *value,
size_t size, u8 cmp_op, u8 cmp_mode)
{
struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which,
opcode, 0);
struct ceph_pagelist *pagelist;
size_t payload_len;
int ret;
BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
pagelist = ceph_pagelist_alloc(GFP_NOFS);
if (!pagelist)
return -ENOMEM;
payload_len = strlen(name);
op->xattr.name_len = payload_len;
ret = ceph_pagelist_append(pagelist, name, payload_len);
if (ret)
goto err_pagelist_free;
op->xattr.value_len = size;
ret = ceph_pagelist_append(pagelist, value, size);
if (ret)
goto err_pagelist_free;
payload_len += size;
op->xattr.cmp_op = cmp_op;
op->xattr.cmp_mode = cmp_mode;
ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist);
op->indata_len = payload_len;
return 0;
err_pagelist_free:
ceph_pagelist_release(pagelist);
return ret;
}
EXPORT_SYMBOL(osd_req_op_xattr_init);
/*
* @watch_opcode: CEPH_OSD_WATCH_OP_*
*/
static void osd_req_op_watch_init(struct ceph_osd_request *req, int which,
u8 watch_opcode, u64 cookie, u32 gen)
{
struct ceph_osd_req_op *op;
op = osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0);
op->watch.cookie = cookie;
op->watch.op = watch_opcode;
op->watch.gen = gen;
}
/*
* prot_ver, timeout and notify payload (may be empty) should already be
* encoded in @request_pl
*/
static void osd_req_op_notify_init(struct ceph_osd_request *req, int which,
u64 cookie, struct ceph_pagelist *request_pl)
{
struct ceph_osd_req_op *op;
op = osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
op->notify.cookie = cookie;
ceph_osd_data_pagelist_init(&op->notify.request_data, request_pl);
op->indata_len = request_pl->length;
}
/*
* @flags: CEPH_OSD_OP_ALLOC_HINT_FLAG_*
*/
void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
unsigned int which,
u64 expected_object_size,
u64 expected_write_size,
u32 flags)
{
struct ceph_osd_req_op *op;
op = osd_req_op_init(osd_req, which, CEPH_OSD_OP_SETALLOCHINT, 0);
op->alloc_hint.expected_object_size = expected_object_size;
op->alloc_hint.expected_write_size = expected_write_size;
op->alloc_hint.flags = flags;
/*
* CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
* not worth a feature bit. Set FAILOK per-op flag to make
* sure older osds don't trip over an unsupported opcode.
*/
op->flags |= CEPH_OSD_OP_FLAG_FAILOK;
}
EXPORT_SYMBOL(osd_req_op_alloc_hint_init);
static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
struct ceph_osd_data *osd_data)
{
u64 length = ceph_osd_data_length(osd_data);
if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
BUG_ON(length > (u64) SIZE_MAX);
if (length)
ceph_msg_data_add_pages(msg, osd_data->pages,
length, osd_data->alignment, false);
} else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
BUG_ON(!length);
ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
#ifdef CONFIG_BLOCK
} else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) {
ceph_msg_data_add_bio(msg, &osd_data->bio_pos, length);
#endif
} else if (osd_data->type == CEPH_OSD_DATA_TYPE_BVECS) {
ceph_msg_data_add_bvecs(msg, &osd_data->bvec_pos);
} else if (osd_data->type == CEPH_OSD_DATA_TYPE_ITER) {
ceph_msg_data_add_iter(msg, &osd_data->iter);
} else {
BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE);
}
}
static u32 osd_req_encode_op(struct ceph_osd_op *dst,
const struct ceph_osd_req_op *src)
{
switch (src->op) {
case CEPH_OSD_OP_STAT:
break;
case CEPH_OSD_OP_READ:
case CEPH_OSD_OP_SPARSE_READ:
case CEPH_OSD_OP_WRITE:
case CEPH_OSD_OP_WRITEFULL:
case CEPH_OSD_OP_ZERO:
case CEPH_OSD_OP_TRUNCATE:
dst->extent.offset = cpu_to_le64(src->extent.offset);
dst->extent.length = cpu_to_le64(src->extent.length);
dst->extent.truncate_size =
cpu_to_le64(src->extent.truncate_size);
dst->extent.truncate_seq =
cpu_to_le32(src->extent.truncate_seq);
break;
case CEPH_OSD_OP_CALL:
dst->cls.class_len = src->cls.class_len;
dst->cls.method_len = src->cls.method_len;
dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
break;
case CEPH_OSD_OP_WATCH:
dst->watch.cookie = cpu_to_le64(src->watch.cookie);
dst->watch.ver = cpu_to_le64(0);
dst->watch.op = src->watch.op;
dst->watch.gen = cpu_to_le32(src->watch.gen);
break;
case CEPH_OSD_OP_NOTIFY_ACK:
break;
case CEPH_OSD_OP_NOTIFY:
dst->notify.cookie = cpu_to_le64(src->notify.cookie);
break;
case CEPH_OSD_OP_LIST_WATCHERS:
break;
case CEPH_OSD_OP_SETALLOCHINT:
dst->alloc_hint.expected_object_size =
cpu_to_le64(src->alloc_hint.expected_object_size);
dst->alloc_hint.expected_write_size =
cpu_to_le64(src->alloc_hint.expected_write_size);
dst->alloc_hint.flags = cpu_to_le32(src->alloc_hint.flags);
break;
case CEPH_OSD_OP_SETXATTR:
case CEPH_OSD_OP_CMPXATTR:
dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
dst->xattr.cmp_op = src->xattr.cmp_op;
dst->xattr.cmp_mode = src->xattr.cmp_mode;
break;
case CEPH_OSD_OP_CREATE:
case CEPH_OSD_OP_DELETE:
break;
case CEPH_OSD_OP_COPY_FROM2:
dst->copy_from.snapid = cpu_to_le64(src->copy_from.snapid);
dst->copy_from.src_version =
cpu_to_le64(src->copy_from.src_version);
dst->copy_from.flags = src->copy_from.flags;
dst->copy_from.src_fadvise_flags =
cpu_to_le32(src->copy_from.src_fadvise_flags);
break;
case CEPH_OSD_OP_ASSERT_VER:
dst->assert_ver.unused = cpu_to_le64(0);
dst->assert_ver.ver = cpu_to_le64(src->assert_ver.ver);
break;
default:
pr_err("unsupported osd opcode %s\n",
ceph_osd_op_name(src->op));
WARN_ON(1);
return 0;
}
dst->op = cpu_to_le16(src->op);
dst->flags = cpu_to_le32(src->flags);
dst->payload_len = cpu_to_le32(src->indata_len);
return src->indata_len;
}
/*
* build new request AND message, calculate layout, and adjust file
* extent as needed.
*
* if the file was recently truncated, we include information about its
* old and new size so that the object can be updated appropriately. (we
* avoid synchronously deleting truncated objects because it's slow.)
*/
struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
struct ceph_file_layout *layout,
struct ceph_vino vino,
u64 off, u64 *plen,
unsigned int which, int num_ops,
int opcode, int flags,
struct ceph_snap_context *snapc,
u32 truncate_seq,
u64 truncate_size,
bool use_mempool)
{
struct ceph_osd_request *req;
u64 objnum = 0;
u64 objoff = 0;
u64 objlen = 0;
int r;
BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE &&
opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE &&
opcode != CEPH_OSD_OP_SPARSE_READ);
req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
GFP_NOFS);
if (!req) {
r = -ENOMEM;
goto fail;
}
/* calculate max write size */
r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
if (r)
goto fail;
if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
osd_req_op_init(req, which, opcode, 0);
} else {
u32 object_size = layout->object_size;
u32 object_base = off - objoff;
if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
if (truncate_size <= object_base) {
truncate_size = 0;
} else {
truncate_size -= object_base;
if (truncate_size > object_size)
truncate_size = object_size;
}
}
osd_req_op_extent_init(req, which, opcode, objoff, objlen,
truncate_size, truncate_seq);
}
req->r_base_oloc.pool = layout->pool_id;
req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns);
ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum);
req->r_flags = flags | osdc->client->options->read_from_replica;
req->r_snapid = vino.snap;
if (flags & CEPH_OSD_FLAG_WRITE)
req->r_data_offset = off;
if (num_ops > 1) {
int num_req_ops, num_rep_ops;
/*
* If this is a multi-op write request, assume that we'll need
* request ops. If it's a multi-op read then assume we'll need
* reply ops. Anything else and call it -EINVAL.
*/
if (flags & CEPH_OSD_FLAG_WRITE) {
num_req_ops = num_ops;
num_rep_ops = 0;
} else if (flags & CEPH_OSD_FLAG_READ) {
num_req_ops = 0;
num_rep_ops = num_ops;
} else {
r = -EINVAL;
goto fail;
}
r = __ceph_osdc_alloc_messages(req, GFP_NOFS, num_req_ops,
num_rep_ops);
} else {
r = ceph_osdc_alloc_messages(req, GFP_NOFS);
}
if (r)
goto fail;
return req;
fail:
ceph_osdc_put_request(req);
return ERR_PTR(r);
}
EXPORT_SYMBOL(ceph_osdc_new_request);
int __ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op, int cnt)
{
op->extent.sparse_ext_cnt = cnt;
op->extent.sparse_ext = kmalloc_array(cnt,
sizeof(*op->extent.sparse_ext),
GFP_NOFS);
if (!op->extent.sparse_ext)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL(__ceph_alloc_sparse_ext_map);
/*
* We keep osd requests in an rbtree, sorted by ->r_tid.
*/
DEFINE_RB_FUNCS(request, struct ceph_osd_request, r_tid, r_node)
DEFINE_RB_FUNCS(request_mc, struct ceph_osd_request, r_tid, r_mc_node)
/*
* Call @fn on each OSD request as long as @fn returns 0.
*/
static void for_each_request(struct ceph_osd_client *osdc,
int (*fn)(struct ceph_osd_request *req, void *arg),
void *arg)
{
struct rb_node *n, *p;
for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
for (p = rb_first(&osd->o_requests); p; ) {
struct ceph_osd_request *req =
rb_entry(p, struct ceph_osd_request, r_node);
p = rb_next(p);
if (fn(req, arg))
return;
}
}
for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
struct ceph_osd_request *req =
rb_entry(p, struct ceph_osd_request, r_node);
p = rb_next(p);
if (fn(req, arg))
return;
}
}
static bool osd_homeless(struct ceph_osd *osd)
{
return osd->o_osd == CEPH_HOMELESS_OSD;
}
static bool osd_registered(struct ceph_osd *osd)
{
verify_osdc_locked(osd->o_osdc);
return !RB_EMPTY_NODE(&osd->o_node);
}
/*
* Assumes @osd is zero-initialized.
*/
static void osd_init(struct ceph_osd *osd)
{
refcount_set(&osd->o_ref, 1);
RB_CLEAR_NODE(&osd->o_node);
spin_lock_init(&osd->o_requests_lock);
osd->o_requests = RB_ROOT;
osd->o_linger_requests = RB_ROOT;
osd->o_backoff_mappings = RB_ROOT;
osd->o_backoffs_by_id = RB_ROOT;
INIT_LIST_HEAD(&osd->o_osd_lru);
INIT_LIST_HEAD(&osd->o_keepalive_item);
osd->o_incarnation = 1;
mutex_init(&osd->lock);
}
static void ceph_init_sparse_read(struct ceph_sparse_read *sr)
{
kfree(sr->sr_extent);
memset(sr, '\0', sizeof(*sr));
sr->sr_state = CEPH_SPARSE_READ_HDR;
}
static void osd_cleanup(struct ceph_osd *osd)
{
WARN_ON(!RB_EMPTY_NODE(&osd->o_node));
WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoff_mappings));
WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoffs_by_id));
WARN_ON(!list_empty(&osd->o_osd_lru));
WARN_ON(!list_empty(&osd->o_keepalive_item));
ceph_init_sparse_read(&osd->o_sparse_read);
if (osd->o_auth.authorizer) {
WARN_ON(osd_homeless(osd));
ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
}
}
/*
* Track open sessions with osds.
*/
static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
{
struct ceph_osd *osd;
WARN_ON(onum == CEPH_HOMELESS_OSD);
osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL);
osd_init(osd);
osd->o_osdc = osdc;
osd->o_osd = onum;
osd->o_sparse_op_idx = -1;
ceph_init_sparse_read(&osd->o_sparse_read);
ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
return osd;
}
static struct ceph_osd *get_osd(struct ceph_osd *osd)
{
if (refcount_inc_not_zero(&osd->o_ref)) {
dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1,
refcount_read(&osd->o_ref));
return osd;
} else {
dout("get_osd %p FAIL\n", osd);
return NULL;
}
}
static void put_osd(struct ceph_osd *osd)
{
dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref),
refcount_read(&osd->o_ref) - 1);
if (refcount_dec_and_test(&osd->o_ref)) {
osd_cleanup(osd);
kfree(osd);
}
}
DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node)
static void __move_osd_to_lru(struct ceph_osd *osd)
{
struct ceph_osd_client *osdc = osd->o_osdc;
dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
BUG_ON(!list_empty(&osd->o_osd_lru));
spin_lock(&osdc->osd_lru_lock);
list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
spin_unlock(&osdc->osd_lru_lock);
osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl;
}
static void maybe_move_osd_to_lru(struct ceph_osd *osd)
{
if (RB_EMPTY_ROOT(&osd->o_requests) &&
RB_EMPTY_ROOT(&osd->o_linger_requests))
__move_osd_to_lru(osd);
}
static void __remove_osd_from_lru(struct ceph_osd *osd)
{
struct ceph_osd_client *osdc = osd->o_osdc;
dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
spin_lock(&osdc->osd_lru_lock);
if (!list_empty(&osd->o_osd_lru))
list_del_init(&osd->o_osd_lru);
spin_unlock(&osdc->osd_lru_lock);
}
/*
* Close the connection and assign any leftover requests to the
* homeless session.
*/
static void close_osd(struct ceph_osd *osd)
{
struct ceph_osd_client *osdc = osd->o_osdc;
struct rb_node *n;
verify_osdc_wrlocked(osdc);
dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
ceph_con_close(&osd->o_con);
for (n = rb_first(&osd->o_requests); n; ) {
struct ceph_osd_request *req =
rb_entry(n, struct ceph_osd_request, r_node);
n = rb_next(n); /* unlink_request() */
dout(" reassigning req %p tid %llu\n", req, req->r_tid);
unlink_request(osd, req);
link_request(&osdc->homeless_osd, req);
}
for (n = rb_first(&osd->o_linger_requests); n; ) {
struct ceph_osd_linger_request *lreq =
rb_entry(n, struct ceph_osd_linger_request, node);
n = rb_next(n); /* unlink_linger() */
dout(" reassigning lreq %p linger_id %llu\n", lreq,
lreq->linger_id);
unlink_linger(osd, lreq);
link_linger(&osdc->homeless_osd, lreq);
}
clear_backoffs(osd);
__remove_osd_from_lru(osd);
erase_osd(&osdc->osds, osd);
put_osd(osd);
}
/*
* reset osd connect
*/
static int reopen_osd(struct ceph_osd *osd)
{
struct ceph_entity_addr *peer_addr;
dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
if (RB_EMPTY_ROOT(&osd->o_requests) &&
RB_EMPTY_ROOT(&osd->o_linger_requests)) {
close_osd(osd);
return -ENODEV;
}
peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd];
if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
!ceph_con_opened(&osd->o_con)) {
struct rb_node *n;
dout("osd addr hasn't changed and connection never opened, "
"letting msgr retry\n");
/* touch each r_stamp for handle_timeout()'s benfit */
for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
struct ceph_osd_request *req =
rb_entry(n, struct ceph_osd_request, r_node);
req->r_stamp = jiffies;
}
return -EAGAIN;
}
ceph_con_close(&osd->o_con);
ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
osd->o_incarnation++;
return 0;
}
static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o,
bool wrlocked)
{
struct ceph_osd *osd;
if (wrlocked)
verify_osdc_wrlocked(osdc);
else
verify_osdc_locked(osdc);
if (o != CEPH_HOMELESS_OSD)
osd = lookup_osd(&osdc->osds, o);
else
osd = &osdc->homeless_osd;
if (!osd) {
if (!wrlocked)
return ERR_PTR(-EAGAIN);
osd = create_osd(osdc, o);
insert_osd(&osdc->osds, osd);
ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
&osdc->osdmap->osd_addr[osd->o_osd]);
}
dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd);
return osd;
}
/*
* Create request <-> OSD session relation.
*
* @req has to be assigned a tid, @osd may be homeless.
*/
static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req)
{
verify_osd_locked(osd);
WARN_ON(!req->r_tid || req->r_osd);
dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
req, req->r_tid);
if (!osd_homeless(osd))
__remove_osd_from_lru(osd);
else
atomic_inc(&osd->o_osdc->num_homeless);
get_osd(osd);
spin_lock(&osd->o_requests_lock);
insert_request(&osd->o_requests, req);
spin_unlock(&osd->o_requests_lock);
req->r_osd = osd;
}
static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req)
{
verify_osd_locked(osd);
WARN_ON(req->r_osd != osd);
dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
req, req->r_tid);
req->r_osd = NULL;
spin_lock(&osd->o_requests_lock);
erase_request(&osd->o_requests, req);
spin_unlock(&osd->o_requests_lock);
put_osd(osd);
if (!osd_homeless(osd))
maybe_move_osd_to_lru(osd);
else
atomic_dec(&osd->o_osdc->num_homeless);
}
static bool __pool_full(struct ceph_pg_pool_info *pi)
{
return pi->flags & CEPH_POOL_FLAG_FULL;
}
static bool have_pool_full(struct ceph_osd_client *osdc)
{
struct rb_node *n;
for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
struct ceph_pg_pool_info *pi =
rb_entry(n, struct ceph_pg_pool_info, node);
if (__pool_full(pi))
return true;
}
return false;
}
static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id)
{
struct ceph_pg_pool_info *pi;
pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
if (!pi)
return false;
return __pool_full(pi);
}
/*
* Returns whether a request should be blocked from being sent
* based on the current osdmap and osd_client settings.
*/
static bool target_should_be_paused(struct ceph_osd_client *osdc,
const struct ceph_osd_request_target *t,
struct ceph_pg_pool_info *pi)
{
bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
__pool_full(pi);
WARN_ON(pi->id != t->target_oloc.pool);
return ((t->flags & CEPH_OSD_FLAG_READ) && pauserd) ||
((t->flags & CEPH_OSD_FLAG_WRITE) && pausewr) ||
(osdc->osdmap->epoch < osdc->epoch_barrier);
}
static int pick_random_replica(const struct ceph_osds *acting)
{
int i = get_random_u32_below(acting->size);
dout("%s picked osd%d, primary osd%d\n", __func__,
acting->osds[i], acting->primary);
return i;
}
/*
* Picks the closest replica based on client's location given by
* crush_location option. Prefers the primary if the locality is
* the same.
*/
static int pick_closest_replica(struct ceph_osd_client *osdc,
const struct ceph_osds *acting)
{
struct ceph_options *opt = osdc->client->options;
int best_i, best_locality;
int i = 0, locality;
do {
locality = ceph_get_crush_locality(osdc->osdmap,
acting->osds[i],
&opt->crush_locs);
if (i == 0 ||
(locality >= 0 && best_locality < 0) ||
(locality >= 0 && best_locality >= 0 &&
locality < best_locality)) {
best_i = i;
best_locality = locality;
}
} while (++i < acting->size);
dout("%s picked osd%d with locality %d, primary osd%d\n", __func__,
acting->osds[best_i], best_locality, acting->primary);
return best_i;
}
enum calc_target_result {
CALC_TARGET_NO_ACTION = 0,
CALC_TARGET_NEED_RESEND,
CALC_TARGET_POOL_DNE,
};
static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
struct ceph_osd_request_target *t,
bool any_change)
{
struct ceph_pg_pool_info *pi;
struct ceph_pg pgid, last_pgid;
struct ceph_osds up, acting;
bool is_read = t->flags & CEPH_OSD_FLAG_READ;
bool is_write = t->flags & CEPH_OSD_FLAG_WRITE;
bool force_resend = false;
bool unpaused = false;
bool legacy_change = false;
bool split = false;
bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
bool recovery_deletes = ceph_osdmap_flag(osdc,
CEPH_OSDMAP_RECOVERY_DELETES);
enum calc_target_result ct_res;
t->epoch = osdc->osdmap->epoch;
pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool);
if (!pi) {
t->osd = CEPH_HOMELESS_OSD;
ct_res = CALC_TARGET_POOL_DNE;
goto out;
}
if (osdc->osdmap->epoch == pi->last_force_request_resend) {
if (t->last_force_resend < pi->last_force_request_resend) {
t->last_force_resend = pi->last_force_request_resend;
force_resend = true;
} else if (t->last_force_resend == 0) {
force_resend = true;
}
}
/* apply tiering */
ceph_oid_copy(&t->target_oid, &t->base_oid);
ceph_oloc_copy(&t->target_oloc, &t->base_oloc);
if ((t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
if (is_read && pi->read_tier >= 0)
t->target_oloc.pool = pi->read_tier;
if (is_write && pi->write_tier >= 0)
t->target_oloc.pool = pi->write_tier;
pi = ceph_pg_pool_by_id(osdc->osdmap, t->target_oloc.pool);
if (!pi) {
t->osd = CEPH_HOMELESS_OSD;
ct_res = CALC_TARGET_POOL_DNE;
goto out;
}
}
__ceph_object_locator_to_pg(pi, &t->target_oid, &t->target_oloc, &pgid);
last_pgid.pool = pgid.pool;
last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask);
ceph_pg_to_up_acting_osds(osdc->osdmap, pi, &pgid, &up, &acting);
if (any_change &&
ceph_is_new_interval(&t->acting,
&acting,
&t->up,
&up,
t->size,
pi->size,
t->min_size,
pi->min_size,
t->pg_num,
pi->pg_num,
t->sort_bitwise,
sort_bitwise,
t->recovery_deletes,
recovery_deletes,
&last_pgid))
force_resend = true;
if (t->paused && !target_should_be_paused(osdc, t, pi)) {
t->paused = false;
unpaused = true;
}
legacy_change = ceph_pg_compare(&t->pgid, &pgid) ||
ceph_osds_changed(&t->acting, &acting,
t->used_replica || any_change);
if (t->pg_num)
split = ceph_pg_is_split(&last_pgid, t->pg_num, pi->pg_num);
if (legacy_change || force_resend || split) {
t->pgid = pgid; /* struct */
ceph_pg_to_primary_shard(osdc->osdmap, pi, &pgid, &t->spgid);
ceph_osds_copy(&t->acting, &acting);
ceph_osds_copy(&t->up, &up);
t->size = pi->size;
t->min_size = pi->min_size;
t->pg_num = pi->pg_num;
t->pg_num_mask = pi->pg_num_mask;
t->sort_bitwise = sort_bitwise;
t->recovery_deletes = recovery_deletes;
if ((t->flags & (CEPH_OSD_FLAG_BALANCE_READS |
CEPH_OSD_FLAG_LOCALIZE_READS)) &&
!is_write && pi->type == CEPH_POOL_TYPE_REP &&
acting.size > 1) {
int pos;
WARN_ON(!is_read || acting.osds[0] != acting.primary);
if (t->flags & CEPH_OSD_FLAG_BALANCE_READS) {
pos = pick_random_replica(&acting);
} else {
pos = pick_closest_replica(osdc, &acting);
}
t->osd = acting.osds[pos];
t->used_replica = pos > 0;
} else {
t->osd = acting.primary;
t->used_replica = false;
}
}
if (unpaused || legacy_change || force_resend || split)
ct_res = CALC_TARGET_NEED_RESEND;
else
ct_res = CALC_TARGET_NO_ACTION;
out:
dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused,
legacy_change, force_resend, split, ct_res, t->osd);
return ct_res;
}
static struct ceph_spg_mapping *alloc_spg_mapping(void)
{
struct ceph_spg_mapping *spg;
spg = kmalloc(sizeof(*spg), GFP_NOIO);
if (!spg)
return NULL;
RB_CLEAR_NODE(&spg->node);
spg->backoffs = RB_ROOT;
return spg;
}
static void free_spg_mapping(struct ceph_spg_mapping *spg)
{
WARN_ON(!RB_EMPTY_NODE(&spg->node));
WARN_ON(!RB_EMPTY_ROOT(&spg->backoffs));
kfree(spg);
}
/*
* rbtree of ceph_spg_mapping for handling map<spg_t, ...>, similar to
* ceph_pg_mapping. Used to track OSD backoffs -- a backoff [range] is
* defined only within a specific spgid; it does not pass anything to
* children on split, or to another primary.
*/
DEFINE_RB_FUNCS2(spg_mapping, struct ceph_spg_mapping, spgid, ceph_spg_compare,
RB_BYPTR, const struct ceph_spg *, node)
static u64 hoid_get_bitwise_key(const struct ceph_hobject_id *hoid)
{
return hoid->is_max ? 0x100000000ull : hoid->hash_reverse_bits;
}
static void hoid_get_effective_key(const struct ceph_hobject_id *hoid,
void **pkey, size_t *pkey_len)
{
if (hoid->key_len) {
*pkey = hoid->key;
*pkey_len = hoid->key_len;
} else {
*pkey = hoid->oid;
*pkey_len = hoid->oid_len;
}
}
static int compare_names(const void *name1, size_t name1_len,
const void *name2, size_t name2_len)
{
int ret;
ret = memcmp(name1, name2, min(name1_len, name2_len));
if (!ret) {
if (name1_len < name2_len)
ret = -1;
else if (name1_len > name2_len)
ret = 1;
}
return ret;
}
static int hoid_compare(const struct ceph_hobject_id *lhs,
const struct ceph_hobject_id *rhs)
{
void *effective_key1, *effective_key2;
size_t effective_key1_len, effective_key2_len;
int ret;
if (lhs->is_max < rhs->is_max)
return -1;
if (lhs->is_max > rhs->is_max)
return 1;
if (lhs->pool < rhs->pool)
return -1;
if (lhs->pool > rhs->pool)
return 1;
if (hoid_get_bitwise_key(lhs) < hoid_get_bitwise_key(rhs))
return -1;
if (hoid_get_bitwise_key(lhs) > hoid_get_bitwise_key(rhs))
return 1;
ret = compare_names(lhs->nspace, lhs->nspace_len,
rhs->nspace, rhs->nspace_len);
if (ret)
return ret;
hoid_get_effective_key(lhs, &effective_key1, &effective_key1_len);
hoid_get_effective_key(rhs, &effective_key2, &effective_key2_len);
ret = compare_names(effective_key1, effective_key1_len,
effective_key2, effective_key2_len);
if (ret)
return ret;
ret = compare_names(lhs->oid, lhs->oid_len, rhs->oid, rhs->oid_len);
if (ret)
return ret;
if (lhs->snapid < rhs->snapid)
return -1;
if (lhs->snapid > rhs->snapid)
return 1;
return 0;
}
/*
* For decoding ->begin and ->end of MOSDBackoff only -- no MIN/MAX
* compat stuff here.
*
* Assumes @hoid is zero-initialized.
*/
static int decode_hoid(void **p, void *end, struct ceph_hobject_id *hoid)
{
u8 struct_v;
u32 struct_len;
int ret;
ret = ceph_start_decoding(p, end, 4, "hobject_t", &struct_v,
&struct_len);
if (ret)
return ret;
if (struct_v < 4) {
pr_err("got struct_v %d < 4 of hobject_t\n", struct_v);
goto e_inval;
}
hoid->key = ceph_extract_encoded_string(p, end, &hoid->key_len,
GFP_NOIO);
if (IS_ERR(hoid->key)) {
ret = PTR_ERR(hoid->key);
hoid->key = NULL;
return ret;
}
hoid->oid = ceph_extract_encoded_string(p, end, &hoid->oid_len,
GFP_NOIO);
if (IS_ERR(hoid->oid)) {
ret = PTR_ERR(hoid->oid);
hoid->oid = NULL;
return ret;
}
ceph_decode_64_safe(p, end, hoid->snapid, e_inval);
ceph_decode_32_safe(p, end, hoid->hash, e_inval);
ceph_decode_8_safe(p, end, hoid->is_max, e_inval);
hoid->nspace = ceph_extract_encoded_string(p, end, &hoid->nspace_len,
GFP_NOIO);
if (IS_ERR(hoid->nspace)) {
ret = PTR_ERR(hoid->nspace);
hoid->nspace = NULL;
return ret;
}
ceph_decode_64_safe(p, end, hoid->pool, e_inval);
ceph_hoid_build_hash_cache(hoid);
return 0;
e_inval:
return -EINVAL;
}
static int hoid_encoding_size(const struct ceph_hobject_id *hoid)
{
return 8 + 4 + 1 + 8 + /* snapid, hash, is_max, pool */
4 + hoid->key_len + 4 + hoid->oid_len + 4 + hoid->nspace_len;
}
static void encode_hoid(void **p, void *end, const struct ceph_hobject_id *hoid)
{
ceph_start_encoding(p, 4, 3, hoid_encoding_size(hoid));
ceph_encode_string(p, end, hoid->key, hoid->key_len);
ceph_encode_string(p, end, hoid->oid, hoid->oid_len);
ceph_encode_64(p, hoid->snapid);
ceph_encode_32(p, hoid->hash);
ceph_encode_8(p, hoid->is_max);
ceph_encode_string(p, end, hoid->nspace, hoid->nspace_len);
ceph_encode_64(p, hoid->pool);
}
static void free_hoid(struct ceph_hobject_id *hoid)
{
if (hoid) {
kfree(hoid->key);
kfree(hoid->oid);
kfree(hoid->nspace);
kfree(hoid);
}
}
static struct ceph_osd_backoff *alloc_backoff(void)
{
struct ceph_osd_backoff *backoff;
backoff = kzalloc(sizeof(*backoff), GFP_NOIO);
if (!backoff)
return NULL;
RB_CLEAR_NODE(&backoff->spg_node);
RB_CLEAR_NODE(&backoff->id_node);
return backoff;
}
static void free_backoff(struct ceph_osd_backoff *backoff)
{
WARN_ON(!RB_EMPTY_NODE(&backoff->spg_node));
WARN_ON(!RB_EMPTY_NODE(&backoff->id_node));
free_hoid(backoff->begin);
free_hoid(backoff->end);
kfree(backoff);
}
/*
* Within a specific spgid, backoffs are managed by ->begin hoid.
*/
DEFINE_RB_INSDEL_FUNCS2(backoff, struct ceph_osd_backoff, begin, hoid_compare,
RB_BYVAL, spg_node);
static struct ceph_osd_backoff *lookup_containing_backoff(struct rb_root *root,
const struct ceph_hobject_id *hoid)
{
struct rb_node *n = root->rb_node;
while (n) {
struct ceph_osd_backoff *cur =
rb_entry(n, struct ceph_osd_backoff, spg_node);
int cmp;
cmp = hoid_compare(hoid, cur->begin);
if (cmp < 0) {
n = n->rb_left;
} else if (cmp > 0) {
if (hoid_compare(hoid, cur->end) < 0)
return cur;
n = n->rb_right;
} else {
return cur;
}
}
return NULL;
}
/*
* Each backoff has a unique id within its OSD session.
*/
DEFINE_RB_FUNCS(backoff_by_id, struct ceph_osd_backoff, id, id_node)
static void clear_backoffs(struct ceph_osd *osd)
{
while (!RB_EMPTY_ROOT(&osd->o_backoff_mappings)) {
struct ceph_spg_mapping *spg =
rb_entry(rb_first(&osd->o_backoff_mappings),
struct ceph_spg_mapping, node);
while (!RB_EMPTY_ROOT(&spg->backoffs)) {
struct ceph_osd_backoff *backoff =
rb_entry(rb_first(&spg->backoffs),
struct ceph_osd_backoff, spg_node);
erase_backoff(&spg->backoffs, backoff);
erase_backoff_by_id(&osd->o_backoffs_by_id, backoff);
free_backoff(backoff);
}
erase_spg_mapping(&osd->o_backoff_mappings, spg);
free_spg_mapping(spg);
}
}
/*
* Set up a temporary, non-owning view into @t.
*/
static void hoid_fill_from_target(struct ceph_hobject_id *hoid,
const struct ceph_osd_request_target *t)
{
hoid->key = NULL;
hoid->key_len = 0;
hoid->oid = t->target_oid.name;
hoid->oid_len = t->target_oid.name_len;
hoid->snapid = CEPH_NOSNAP;
hoid->hash = t->pgid.seed;
hoid->is_max = false;
if (t->target_oloc.pool_ns) {
hoid->nspace = t->target_oloc.pool_ns->str;
hoid->nspace_len = t->target_oloc.pool_ns->len;
} else {
hoid->nspace = NULL;
hoid->nspace_len = 0;
}
hoid->pool = t->target_oloc.pool;
ceph_hoid_build_hash_cache(hoid);
}
static bool should_plug_request(struct ceph_osd_request *req)
{
struct ceph_osd *osd = req->r_osd;
struct ceph_spg_mapping *spg;
struct ceph_osd_backoff *backoff;
struct ceph_hobject_id hoid;
spg = lookup_spg_mapping(&osd->o_backoff_mappings, &req->r_t.spgid);
if (!spg)
return false;
hoid_fill_from_target(&hoid, &req->r_t);
backoff = lookup_containing_backoff(&spg->backoffs, &hoid);
if (!backoff)
return false;
dout("%s req %p tid %llu backoff osd%d spgid %llu.%xs%d id %llu\n",
__func__, req, req->r_tid, osd->o_osd, backoff->spgid.pgid.pool,
backoff->spgid.pgid.seed, backoff->spgid.shard, backoff->id);
return true;
}
/*
* Keep get_num_data_items() in sync with this function.
*/
static void setup_request_data(struct ceph_osd_request *req)
{
struct ceph_msg *request_msg = req->r_request;
struct ceph_msg *reply_msg = req->r_reply;
struct ceph_osd_req_op *op;
if (req->r_request->num_data_items || req->r_reply->num_data_items)
return;
WARN_ON(request_msg->data_length || reply_msg->data_length);
for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) {
switch (op->op) {
/* request */
case CEPH_OSD_OP_WRITE:
case CEPH_OSD_OP_WRITEFULL:
WARN_ON(op->indata_len != op->extent.length);
ceph_osdc_msg_data_add(request_msg,
&op->extent.osd_data);
break;
case CEPH_OSD_OP_SETXATTR:
case CEPH_OSD_OP_CMPXATTR:
WARN_ON(op->indata_len != op->xattr.name_len +
op->xattr.value_len);
ceph_osdc_msg_data_add(request_msg,
&op->xattr.osd_data);
break;
case CEPH_OSD_OP_NOTIFY_ACK:
ceph_osdc_msg_data_add(request_msg,
&op->notify_ack.request_data);
break;
case CEPH_OSD_OP_COPY_FROM2:
ceph_osdc_msg_data_add(request_msg,
&op->copy_from.osd_data);
break;
/* reply */
case CEPH_OSD_OP_STAT:
ceph_osdc_msg_data_add(reply_msg,
&op->raw_data_in);
break;
case CEPH_OSD_OP_READ:
case CEPH_OSD_OP_SPARSE_READ:
ceph_osdc_msg_data_add(reply_msg,
&op->extent.osd_data);
break;
case CEPH_OSD_OP_LIST_WATCHERS:
ceph_osdc_msg_data_add(reply_msg,
&op->list_watchers.response_data);
break;
/* both */
case CEPH_OSD_OP_CALL:
WARN_ON(op->indata_len != op->cls.class_len +
op->cls.method_len +
op->cls.indata_len);
ceph_osdc_msg_data_add(request_msg,
&op->cls.request_info);
/* optional, can be NONE */
ceph_osdc_msg_data_add(request_msg,
&op->cls.request_data);
/* optional, can be NONE */
ceph_osdc_msg_data_add(reply_msg,
&op->cls.response_data);
break;
case CEPH_OSD_OP_NOTIFY:
ceph_osdc_msg_data_add(request_msg,
&op->notify.request_data);
ceph_osdc_msg_data_add(reply_msg,
&op->notify.response_data);
break;
}
}
}
static void encode_pgid(void **p, const struct ceph_pg *pgid)
{
ceph_encode_8(p, 1);
ceph_encode_64(p, pgid->pool);
ceph_encode_32(p, pgid->seed);
ceph_encode_32(p, -1); /* preferred */
}
static void encode_spgid(void **p, const struct ceph_spg *spgid)
{
ceph_start_encoding(p, 1, 1, CEPH_PGID_ENCODING_LEN + 1);
encode_pgid(p, &spgid->pgid);
ceph_encode_8(p, spgid->shard);
}
static void encode_oloc(void **p, void *end,
const struct ceph_object_locator *oloc)
{
ceph_start_encoding(p, 5, 4, ceph_oloc_encoding_size(oloc));
ceph_encode_64(p, oloc->pool);
ceph_encode_32(p, -1); /* preferred */
ceph_encode_32(p, 0); /* key len */
if (oloc->pool_ns)
ceph_encode_string(p, end, oloc->pool_ns->str,
oloc->pool_ns->len);
else
ceph_encode_32(p, 0);
}
static void encode_request_partial(struct ceph_osd_request *req,
struct ceph_msg *msg)
{
void *p = msg->front.iov_base;
void *const end = p + msg->front_alloc_len;
u32 data_len = 0;
int i;
if (req->r_flags & CEPH_OSD_FLAG_WRITE) {
/* snapshots aren't writeable */
WARN_ON(req->r_snapid != CEPH_NOSNAP);
} else {
WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec ||
req->r_data_offset || req->r_snapc);
}
setup_request_data(req);
encode_spgid(&p, &req->r_t.spgid); /* actual spg */
ceph_encode_32(&p, req->r_t.pgid.seed); /* raw hash */
ceph_encode_32(&p, req->r_osdc->osdmap->epoch);
ceph_encode_32(&p, req->r_flags);
/* reqid */
ceph_start_encoding(&p, 2, 2, sizeof(struct ceph_osd_reqid));
memset(p, 0, sizeof(struct ceph_osd_reqid));
p += sizeof(struct ceph_osd_reqid);
/* trace */
memset(p, 0, sizeof(struct ceph_blkin_trace_info));
p += sizeof(struct ceph_blkin_trace_info);
ceph_encode_32(&p, 0); /* client_inc, always 0 */
ceph_encode_timespec64(p, &req->r_mtime);
p += sizeof(struct ceph_timespec);
encode_oloc(&p, end, &req->r_t.target_oloc);
ceph_encode_string(&p, end, req->r_t.target_oid.name,
req->r_t.target_oid.name_len);
/* ops, can imply data */
ceph_encode_16(&p, req->r_num_ops);
for (i = 0; i < req->r_num_ops; i++) {
data_len += osd_req_encode_op(p, &req->r_ops[i]);
p += sizeof(struct ceph_osd_op);
}
ceph_encode_64(&p, req->r_snapid); /* snapid */
if (req->r_snapc) {
ceph_encode_64(&p, req->r_snapc->seq);
ceph_encode_32(&p, req->r_snapc->num_snaps);
for (i = 0; i < req->r_snapc->num_snaps; i++)
ceph_encode_64(&p, req->r_snapc->snaps[i]);
} else {
ceph_encode_64(&p, 0); /* snap_seq */
ceph_encode_32(&p, 0); /* snaps len */
}
ceph_encode_32(&p, req->r_attempts); /* retry_attempt */
BUG_ON(p > end - 8); /* space for features */
msg->hdr.version = cpu_to_le16(8); /* MOSDOp v8 */
/* front_len is finalized in encode_request_finish() */
msg->front.iov_len = p - msg->front.iov_base;
msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
msg->hdr.data_len = cpu_to_le32(data_len);
/*
* The header "data_off" is a hint to the receiver allowing it
* to align received data into its buffers such that there's no
* need to re-copy it before writing it to disk (direct I/O).
*/
msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
dout("%s req %p msg %p oid %s oid_len %d\n", __func__, req, msg,
req->r_t.target_oid.name, req->r_t.target_oid.name_len);
}
static void encode_request_finish(struct ceph_msg *msg)
{
void *p = msg->front.iov_base;
void *const partial_end = p + msg->front.iov_len;
void *const end = p + msg->front_alloc_len;
if (CEPH_HAVE_FEATURE(msg->con->peer_features, RESEND_ON_SPLIT)) {
/* luminous OSD -- encode features and be done */
p = partial_end;
ceph_encode_64(&p, msg->con->peer_features);
} else {
struct {
char spgid[CEPH_ENCODING_START_BLK_LEN +
CEPH_PGID_ENCODING_LEN + 1];
__le32 hash;
__le32 epoch;
__le32 flags;
char reqid[CEPH_ENCODING_START_BLK_LEN +
sizeof(struct ceph_osd_reqid)];
char trace[sizeof(struct ceph_blkin_trace_info)];
__le32 client_inc;
struct ceph_timespec mtime;
} __packed head;
struct ceph_pg pgid;
void *oloc, *oid, *tail;
int oloc_len, oid_len, tail_len;
int len;
/*
* Pre-luminous OSD -- reencode v8 into v4 using @head
* as a temporary buffer. Encode the raw PG; the rest
* is just a matter of moving oloc, oid and tail blobs
* around.
*/
memcpy(&head, p, sizeof(head));
p += sizeof(head);
oloc = p;
p += CEPH_ENCODING_START_BLK_LEN;
pgid.pool = ceph_decode_64(&p);
p += 4 + 4; /* preferred, key len */
len = ceph_decode_32(&p);
p += len; /* nspace */
oloc_len = p - oloc;
oid = p;
len = ceph_decode_32(&p);
p += len;
oid_len = p - oid;
tail = p;
tail_len = partial_end - p;
p = msg->front.iov_base;
ceph_encode_copy(&p, &head.client_inc, sizeof(head.client_inc));
ceph_encode_copy(&p, &head.epoch, sizeof(head.epoch));
ceph_encode_copy(&p, &head.flags, sizeof(head.flags));
ceph_encode_copy(&p, &head.mtime, sizeof(head.mtime));
/* reassert_version */
memset(p, 0, sizeof(struct ceph_eversion));
p += sizeof(struct ceph_eversion);
BUG_ON(p >= oloc);
memmove(p, oloc, oloc_len);
p += oloc_len;
pgid.seed = le32_to_cpu(head.hash);
encode_pgid(&p, &pgid); /* raw pg */
BUG_ON(p >= oid);
memmove(p, oid, oid_len);
p += oid_len;
/* tail -- ops, snapid, snapc, retry_attempt */
BUG_ON(p >= tail);
memmove(p, tail, tail_len);
p += tail_len;
msg->hdr.version = cpu_to_le16(4); /* MOSDOp v4 */
}
BUG_ON(p > end);
msg->front.iov_len = p - msg->front.iov_base;
msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
dout("%s msg %p tid %llu %u+%u+%u v%d\n", __func__, msg,
le64_to_cpu(msg->hdr.tid), le32_to_cpu(msg->hdr.front_len),
le32_to_cpu(msg->hdr.middle_len), le32_to_cpu(msg->hdr.data_len),
le16_to_cpu(msg->hdr.version));
}
/*
* @req has to be assigned a tid and registered.
*/
static void send_request(struct ceph_osd_request *req)
{
struct ceph_osd *osd = req->r_osd;
verify_osd_locked(osd);
WARN_ON(osd->o_osd != req->r_t.osd);
/* backoff? */
if (should_plug_request(req))
return;
/*
* We may have a previously queued request message hanging
* around. Cancel it to avoid corrupting the msgr.
*/
if (req->r_sent)
ceph_msg_revoke(req->r_request);
req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR;
if (req->r_attempts)
req->r_flags |= CEPH_OSD_FLAG_RETRY;
else
WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY);
encode_request_partial(req, req->r_request);
dout("%s req %p tid %llu to pgid %llu.%x spgid %llu.%xs%d osd%d e%u flags 0x%x attempt %d\n",
__func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed,
req->r_t.spgid.pgid.pool, req->r_t.spgid.pgid.seed,
req->r_t.spgid.shard, osd->o_osd, req->r_t.epoch, req->r_flags,
req->r_attempts);
req->r_t.paused = false;
req->r_stamp = jiffies;
req->r_attempts++;
req->r_sent = osd->o_incarnation;
req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request));
}
static void maybe_request_map(struct ceph_osd_client *osdc)
{
bool continuous = false;
verify_osdc_locked(osdc);
WARN_ON(!osdc->osdmap->epoch);
if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) ||
ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
dout("%s osdc %p continuous\n", __func__, osdc);
continuous = true;
} else {
dout("%s osdc %p onetime\n", __func__, osdc);
}
if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
osdc->osdmap->epoch + 1, continuous))
ceph_monc_renew_subs(&osdc->client->monc);
}
static void complete_request(struct ceph_osd_request *req, int err);
static void send_map_check(struct ceph_osd_request *req);
static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
{
struct ceph_osd_client *osdc = req->r_osdc;
struct ceph_osd *osd;
enum calc_target_result ct_res;
int err = 0;
bool need_send = false;
bool promoted = false;
WARN_ON(req->r_tid);
dout("%s req %p wrlocked %d\n", __func__, req, wrlocked);
again:
ct_res = calc_target(osdc, &req->r_t, false);
if (ct_res == CALC_TARGET_POOL_DNE && !wrlocked)
goto promote;
osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked);
if (IS_ERR(osd)) {
WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked);
goto promote;
}
if (osdc->abort_err) {
dout("req %p abort_err %d\n", req, osdc->abort_err);
err = osdc->abort_err;
} else if (osdc->osdmap->epoch < osdc->epoch_barrier) {
dout("req %p epoch %u barrier %u\n", req, osdc->osdmap->epoch,
osdc->epoch_barrier);
req->r_t.paused = true;
maybe_request_map(osdc);
} else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
dout("req %p pausewr\n", req);
req->r_t.paused = true;
maybe_request_map(osdc);
} else if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
dout("req %p pauserd\n", req);
req->r_t.paused = true;
maybe_request_map(osdc);
} else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
!(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY |
CEPH_OSD_FLAG_FULL_FORCE)) &&
(ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
pool_full(osdc, req->r_t.base_oloc.pool))) {
dout("req %p full/pool_full\n", req);
if (ceph_test_opt(osdc->client, ABORT_ON_FULL)) {
err = -ENOSPC;
} else {
if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL))
pr_warn_ratelimited("cluster is full (osdmap FULL)\n");
else
pr_warn_ratelimited("pool %lld is full or reached quota\n",
req->r_t.base_oloc.pool);
req->r_t.paused = true;
maybe_request_map(osdc);
}
} else if (!osd_homeless(osd)) {
need_send = true;
} else {
maybe_request_map(osdc);
}
mutex_lock(&osd->lock);
/*
* Assign the tid atomically with send_request() to protect
* multiple writes to the same object from racing with each
* other, resulting in out of order ops on the OSDs.
*/
req->r_tid = atomic64_inc_return(&osdc->last_tid);
link_request(osd, req);
if (need_send)
send_request(req);
else if (err)
complete_request(req, err);
mutex_unlock(&osd->lock);
if (!err && ct_res == CALC_TARGET_POOL_DNE)
send_map_check(req);
if (promoted)
downgrade_write(&osdc->lock);
return;
promote:
up_read(&osdc->lock);
down_write(&osdc->lock);
wrlocked = true;
promoted = true;
goto again;
}
static void account_request(struct ceph_osd_request *req)
{
WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK));
WARN_ON(!(req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_WRITE)));
req->r_flags |= CEPH_OSD_FLAG_ONDISK;
atomic_inc(&req->r_osdc->num_requests);
req->r_start_stamp = jiffies;
req->r_start_latency = ktime_get();
}
static void submit_request(struct ceph_osd_request *req, bool wrlocked)
{
ceph_osdc_get_request(req);
account_request(req);
__submit_request(req, wrlocked);
}
static void finish_request(struct ceph_osd_request *req)
{
struct ceph_osd_client *osdc = req->r_osdc;
WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid));
dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
req->r_end_latency = ktime_get();
if (req->r_osd) {
ceph_init_sparse_read(&req->r_osd->o_sparse_read);
unlink_request(req->r_osd, req);
}
atomic_dec(&osdc->num_requests);
/*
* If an OSD has failed or returned and a request has been sent
* twice, it's possible to get a reply and end up here while the
* request message is queued for delivery. We will ignore the
* reply, so not a big deal, but better to try and catch it.
*/
ceph_msg_revoke(req->r_request);
ceph_msg_revoke_incoming(req->r_reply);
}
static void __complete_request(struct ceph_osd_request *req)
{
dout("%s req %p tid %llu cb %ps result %d\n", __func__, req,
req->r_tid, req->r_callback, req->r_result);
if (req->r_callback)
req->r_callback(req);
complete_all(&req->r_completion);
ceph_osdc_put_request(req);
}
static void complete_request_workfn(struct work_struct *work)
{
struct ceph_osd_request *req =
container_of(work, struct ceph_osd_request, r_complete_work);
__complete_request(req);
}
/*
* This is open-coded in handle_reply().
*/
static void complete_request(struct ceph_osd_request *req, int err)
{
dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
req->r_result = err;
finish_request(req);
INIT_WORK(&req->r_complete_work, complete_request_workfn);
queue_work(req->r_osdc->completion_wq, &req->r_complete_work);
}
static void cancel_map_check(struct ceph_osd_request *req)
{
struct ceph_osd_client *osdc = req->r_osdc;
struct ceph_osd_request *lookup_req;
verify_osdc_wrlocked(osdc);
lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
if (!lookup_req)
return;
WARN_ON(lookup_req != req);
erase_request_mc(&osdc->map_checks, req);
ceph_osdc_put_request(req);
}
static void cancel_request(struct ceph_osd_request *req)
{
dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
cancel_map_check(req);
finish_request(req);
complete_all(&req->r_completion);
ceph_osdc_put_request(req);
}
static void abort_request(struct ceph_osd_request *req, int err)
{
dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
cancel_map_check(req);
complete_request(req, err);
}
static int abort_fn(struct ceph_osd_request *req, void *arg)
{
int err = *(int *)arg;
abort_request(req, err);
return 0; /* continue iteration */
}
/*
* Abort all in-flight requests with @err and arrange for all future
* requests to be failed immediately.
*/
void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err)
{
dout("%s osdc %p err %d\n", __func__, osdc, err);
down_write(&osdc->lock);
for_each_request(osdc, abort_fn, &err);
osdc->abort_err = err;
up_write(&osdc->lock);
}
EXPORT_SYMBOL(ceph_osdc_abort_requests);
void ceph_osdc_clear_abort_err(struct ceph_osd_client *osdc)
{
down_write(&osdc->lock);
osdc->abort_err = 0;
up_write(&osdc->lock);
}
EXPORT_SYMBOL(ceph_osdc_clear_abort_err);
static void update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
{
if (likely(eb > osdc->epoch_barrier)) {
dout("updating epoch_barrier from %u to %u\n",
osdc->epoch_barrier, eb);
osdc->epoch_barrier = eb;
/* Request map if we're not to the barrier yet */
if (eb > osdc->osdmap->epoch)
maybe_request_map(osdc);
}
}
void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
{
down_read(&osdc->lock);
if (unlikely(eb > osdc->epoch_barrier)) {
up_read(&osdc->lock);
down_write(&osdc->lock);
update_epoch_barrier(osdc, eb);
up_write(&osdc->lock);
} else {
up_read(&osdc->lock);
}
}
EXPORT_SYMBOL(ceph_osdc_update_epoch_barrier);
/*
* We can end up releasing caps as a result of abort_request().
* In that case, we probably want to ensure that the cap release message
* has an updated epoch barrier in it, so set the epoch barrier prior to
* aborting the first request.
*/
static int abort_on_full_fn(struct ceph_osd_request *req, void *arg)
{
struct ceph_osd_client *osdc = req->r_osdc;
bool *victims = arg;
if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
(ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
pool_full(osdc, req->r_t.base_oloc.pool))) {
if (!*victims) {
update_epoch_barrier(osdc, osdc->osdmap->epoch);
*victims = true;
}
abort_request(req, -ENOSPC);
}
return 0; /* continue iteration */
}
/*
* Drop all pending requests that are stalled waiting on a full condition to
* clear, and complete them with ENOSPC as the return code. Set the
* osdc->epoch_barrier to the latest map epoch that we've seen if any were
* cancelled.
*/
static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc)
{
bool victims = false;
if (ceph_test_opt(osdc->client, ABORT_ON_FULL) &&
(ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || have_pool_full(osdc)))
for_each_request(osdc, abort_on_full_fn, &victims);
}
static void check_pool_dne(struct ceph_osd_request *req)
{
struct ceph_osd_client *osdc = req->r_osdc;
struct ceph_osdmap *map = osdc->osdmap;
verify_osdc_wrlocked(osdc);
WARN_ON(!map->epoch);
if (req->r_attempts) {
/*
* We sent a request earlier, which means that
* previously the pool existed, and now it does not
* (i.e., it was deleted).
*/
req->r_map_dne_bound = map->epoch;
dout("%s req %p tid %llu pool disappeared\n", __func__, req,
req->r_tid);
} else {
dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__,
req, req->r_tid, req->r_map_dne_bound, map->epoch);
}
if (req->r_map_dne_bound) {
if (map->epoch >= req->r_map_dne_bound) {
/* we had a new enough map */
pr_info_ratelimited("tid %llu pool does not exist\n",
req->r_tid);
complete_request(req, -ENOENT);
}
} else {
send_map_check(req);
}
}
static void map_check_cb(struct ceph_mon_generic_request *greq)
{
struct ceph_osd_client *osdc = &greq->monc->client->osdc;
struct ceph_osd_request *req;
u64 tid = greq->private_data;
WARN_ON(greq->result || !greq->u.newest);
down_write(&osdc->lock);
req = lookup_request_mc(&osdc->map_checks, tid);
if (!req) {
dout("%s tid %llu dne\n", __func__, tid);
goto out_unlock;
}
dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__,
req, req->r_tid, req->r_map_dne_bound, greq->u.newest);
if (!req->r_map_dne_bound)
req->r_map_dne_bound = greq->u.newest;
erase_request_mc(&osdc->map_checks, req);
check_pool_dne(req);
ceph_osdc_put_request(req);
out_unlock:
up_write(&osdc->lock);
}
static void send_map_check(struct ceph_osd_request *req)
{
struct ceph_osd_client *osdc = req->r_osdc;
struct ceph_osd_request *lookup_req;
int ret;
verify_osdc_wrlocked(osdc);
lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
if (lookup_req) {
WARN_ON(lookup_req != req);
return;
}
ceph_osdc_get_request(req);
insert_request_mc(&osdc->map_checks, req);
ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
map_check_cb, req->r_tid);
WARN_ON(ret);
}
/*
* lingering requests, watch/notify v2 infrastructure
*/
static void linger_release(struct kref *kref)
{
struct ceph_osd_linger_request *lreq =
container_of(kref, struct ceph_osd_linger_request, kref);
dout("%s lreq %p reg_req %p ping_req %p\n", __func__, lreq,
lreq->reg_req, lreq->ping_req);
WARN_ON(!RB_EMPTY_NODE(&lreq->node));
WARN_ON(!RB_EMPTY_NODE(&lreq->osdc_node));
WARN_ON(!RB_EMPTY_NODE(&lreq->mc_node));
WARN_ON(!list_empty(&lreq->scan_item));
WARN_ON(!list_empty(&lreq->pending_lworks));
WARN_ON(lreq->osd);
if (lreq->request_pl)
ceph_pagelist_release(lreq->request_pl);
if (lreq->notify_id_pages)
ceph_release_page_vector(lreq->notify_id_pages, 1);
ceph_osdc_put_request(lreq->reg_req);
ceph_osdc_put_request(lreq->ping_req);
target_destroy(&lreq->t);
kfree(lreq);
}
static void linger_put(struct ceph_osd_linger_request *lreq)
{
if (lreq)
kref_put(&lreq->kref, linger_release);
}
static struct ceph_osd_linger_request *
linger_get(struct ceph_osd_linger_request *lreq)
{
kref_get(&lreq->kref);
return lreq;
}
static struct ceph_osd_linger_request *
linger_alloc(struct ceph_osd_client *osdc)
{
struct ceph_osd_linger_request *lreq;
lreq = kzalloc(sizeof(*lreq), GFP_NOIO);
if (!lreq)
return NULL;
kref_init(&lreq->kref);
mutex_init(&lreq->lock);
RB_CLEAR_NODE(&lreq->node);
RB_CLEAR_NODE(&lreq->osdc_node);
RB_CLEAR_NODE(&lreq->mc_node);
INIT_LIST_HEAD(&lreq->scan_item);
INIT_LIST_HEAD(&lreq->pending_lworks);
init_completion(&lreq->reg_commit_wait);
init_completion(&lreq->notify_finish_wait);
lreq->osdc = osdc;
target_init(&lreq->t);
dout("%s lreq %p\n", __func__, lreq);
return lreq;
}
DEFINE_RB_INSDEL_FUNCS(linger, struct ceph_osd_linger_request, linger_id, node)
DEFINE_RB_FUNCS(linger_osdc, struct ceph_osd_linger_request, linger_id, osdc_node)
DEFINE_RB_FUNCS(linger_mc, struct ceph_osd_linger_request, linger_id, mc_node)
/*
* Create linger request <-> OSD session relation.
*
* @lreq has to be registered, @osd may be homeless.
*/
static void link_linger(struct ceph_osd *osd,
struct ceph_osd_linger_request *lreq)
{
verify_osd_locked(osd);
WARN_ON(!lreq->linger_id || lreq->osd);
dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
osd->o_osd, lreq, lreq->linger_id);
if (!osd_homeless(osd))
__remove_osd_from_lru(osd);
else
atomic_inc(&osd->o_osdc->num_homeless);
get_osd(osd);
insert_linger(&osd->o_linger_requests, lreq);
lreq->osd = osd;
}
static void unlink_linger(struct ceph_osd *osd,
struct ceph_osd_linger_request *lreq)
{
verify_osd_locked(osd);
WARN_ON(lreq->osd != osd);
dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
osd->o_osd, lreq, lreq->linger_id);
lreq->osd = NULL;
erase_linger(&osd->o_linger_requests, lreq);
put_osd(osd);
if (!osd_homeless(osd))
maybe_move_osd_to_lru(osd);
else
atomic_dec(&osd->o_osdc->num_homeless);
}
static bool __linger_registered(struct ceph_osd_linger_request *lreq)
{
verify_osdc_locked(lreq->osdc);
return !RB_EMPTY_NODE(&lreq->osdc_node);
}
static bool linger_registered(struct ceph_osd_linger_request *lreq)
{
struct ceph_osd_client *osdc = lreq->osdc;
bool registered;
down_read(&osdc->lock);
registered = __linger_registered(lreq);
up_read(&osdc->lock);
return registered;
}
static void linger_register(struct ceph_osd_linger_request *lreq)
{
struct ceph_osd_client *osdc = lreq->osdc;
verify_osdc_wrlocked(osdc);
WARN_ON(lreq->linger_id);
linger_get(lreq);
lreq->linger_id = ++osdc->last_linger_id;
insert_linger_osdc(&osdc->linger_requests, lreq);
}
static void linger_unregister(struct ceph_osd_linger_request *lreq)
{
struct ceph_osd_client *osdc = lreq->osdc;
verify_osdc_wrlocked(osdc);
erase_linger_osdc(&osdc->linger_requests, lreq);
linger_put(lreq);
}
static void cancel_linger_request(struct ceph_osd_request *req)
{
struct ceph_osd_linger_request *lreq = req->r_priv;
WARN_ON(!req->r_linger);
cancel_request(req);
linger_put(lreq);
}
struct linger_work {
struct work_struct work;
struct ceph_osd_linger_request *lreq;
struct list_head pending_item;
unsigned long queued_stamp;
union {
struct {
u64 notify_id;
u64 notifier_id;
void *payload; /* points into @msg front */
size_t payload_len;
struct ceph_msg *msg; /* for ceph_msg_put() */
} notify;
struct {
int err;
} error;
};
};
static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq,
work_func_t workfn)
{
struct linger_work *lwork;
lwork = kzalloc(sizeof(*lwork), GFP_NOIO);
if (!lwork)
return NULL;
INIT_WORK(&lwork->work, workfn);
INIT_LIST_HEAD(&lwork->pending_item);
lwork->lreq = linger_get(lreq);
return lwork;
}
static void lwork_free(struct linger_work *lwork)
{
struct ceph_osd_linger_request *lreq = lwork->lreq;
mutex_lock(&lreq->lock);
list_del(&lwork->pending_item);
mutex_unlock(&lreq->lock);
linger_put(lreq);
kfree(lwork);
}
static void lwork_queue(struct linger_work *lwork)
{
struct ceph_osd_linger_request *lreq = lwork->lreq;
struct ceph_osd_client *osdc = lreq->osdc;
verify_lreq_locked(lreq);
WARN_ON(!list_empty(&lwork->pending_item));
lwork->queued_stamp = jiffies;
list_add_tail(&lwork->pending_item, &lreq->pending_lworks);
queue_work(osdc->notify_wq, &lwork->work);
}
static void do_watch_notify(struct work_struct *w)
{
struct linger_work *lwork = container_of(w, struct linger_work, work);
struct ceph_osd_linger_request *lreq = lwork->lreq;
if (!linger_registered(lreq)) {
dout("%s lreq %p not registered\n", __func__, lreq);
goto out;
}
WARN_ON(!lreq->is_watch);
dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n",
__func__, lreq, lwork->notify.notify_id, lwork->notify.notifier_id,
lwork->notify.payload_len);
lreq->wcb(lreq->data, lwork->notify.notify_id, lreq->linger_id,
lwork->notify.notifier_id, lwork->notify.payload,
lwork->notify.payload_len);
out:
ceph_msg_put(lwork->notify.msg);
lwork_free(lwork);
}
static void do_watch_error(struct work_struct *w)
{
struct linger_work *lwork = container_of(w, struct linger_work, work);
struct ceph_osd_linger_request *lreq = lwork->lreq;
if (!linger_registered(lreq)) {
dout("%s lreq %p not registered\n", __func__, lreq);
goto out;
}
dout("%s lreq %p err %d\n", __func__, lreq, lwork->error.err);
lreq->errcb(lreq->data, lreq->linger_id, lwork->error.err);
out:
lwork_free(lwork);
}
static void queue_watch_error(struct ceph_osd_linger_request *lreq)
{
struct linger_work *lwork;
lwork = lwork_alloc(lreq, do_watch_error);
if (!lwork) {
pr_err("failed to allocate error-lwork\n");
return;
}
lwork->error.err = lreq->last_error;
lwork_queue(lwork);
}
static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq,
int result)
{
if (!completion_done(&lreq->reg_commit_wait)) {
lreq->reg_commit_error = (result <= 0 ? result : 0);
complete_all(&lreq->reg_commit_wait);
}
}
static void linger_commit_cb(struct ceph_osd_request *req)
{
struct ceph_osd_linger_request *lreq = req->r_priv;
mutex_lock(&lreq->lock);
if (req != lreq->reg_req) {
dout("%s lreq %p linger_id %llu unknown req (%p != %p)\n",
__func__, lreq, lreq->linger_id, req, lreq->reg_req);
goto out;
}
dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq,
lreq->linger_id, req->r_result);
linger_reg_commit_complete(lreq, req->r_result);
lreq->committed = true;
if (!lreq->is_watch) {
struct ceph_osd_data *osd_data =
osd_req_op_data(req, 0, notify, response_data);
void *p = page_address(osd_data->pages[0]);
WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY ||
osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
/* make note of the notify_id */
if (req->r_ops[0].outdata_len >= sizeof(u64)) {
lreq->notify_id = ceph_decode_64(&p);
dout("lreq %p notify_id %llu\n", lreq,
lreq->notify_id);
} else {
dout("lreq %p no notify_id\n", lreq);
}
}
out:
mutex_unlock(&lreq->lock);
linger_put(lreq);
}
static int normalize_watch_error(int err)
{
/*
* Translate ENOENT -> ENOTCONN so that a delete->disconnection
* notification and a failure to reconnect because we raced with
* the delete appear the same to the user.
*/
if (err == -ENOENT)
err = -ENOTCONN;
return err;
}
static void linger_reconnect_cb(struct ceph_osd_request *req)
{
struct ceph_osd_linger_request *lreq = req->r_priv;
mutex_lock(&lreq->lock);
if (req != lreq->reg_req) {
dout("%s lreq %p linger_id %llu unknown req (%p != %p)\n",
__func__, lreq, lreq->linger_id, req, lreq->reg_req);
goto out;
}
dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__,
lreq, lreq->linger_id, req->r_result, lreq->last_error);
if (req->r_result < 0) {
if (!lreq->last_error) {
lreq->last_error = normalize_watch_error(req->r_result);
queue_watch_error(lreq);
}
}
out:
mutex_unlock(&lreq->lock);
linger_put(lreq);
}
static void send_linger(struct ceph_osd_linger_request *lreq)
{
struct ceph_osd_client *osdc = lreq->osdc;
struct ceph_osd_request *req;
int ret;
verify_osdc_wrlocked(osdc);
mutex_lock(&lreq->lock);
dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
if (lreq->reg_req) {
if (lreq->reg_req->r_osd)
cancel_linger_request(lreq->reg_req);
ceph_osdc_put_request(lreq->reg_req);
}
req = ceph_osdc_alloc_request(osdc, NULL, 1, true, GFP_NOIO);
BUG_ON(!req);
target_copy(&req->r_t, &lreq->t);
req->r_mtime = lreq->mtime;
if (lreq->is_watch && lreq->committed) {
osd_req_op_watch_init(req, 0, CEPH_OSD_WATCH_OP_RECONNECT,
lreq->linger_id, ++lreq->register_gen);
dout("lreq %p reconnect register_gen %u\n", lreq,
req->r_ops[0].watch.gen);
req->r_callback = linger_reconnect_cb;
} else {
if (lreq->is_watch) {
osd_req_op_watch_init(req, 0, CEPH_OSD_WATCH_OP_WATCH,
lreq->linger_id, 0);
} else {
lreq->notify_id = 0;
refcount_inc(&lreq->request_pl->refcnt);
osd_req_op_notify_init(req, 0, lreq->linger_id,
lreq->request_pl);
ceph_osd_data_pages_init(
osd_req_op_data(req, 0, notify, response_data),
lreq->notify_id_pages, PAGE_SIZE, 0, false, false);
}
dout("lreq %p register\n", lreq);
req->r_callback = linger_commit_cb;
}
ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
BUG_ON(ret);
req->r_priv = linger_get(lreq);
req->r_linger = true;
lreq->reg_req = req;
mutex_unlock(&lreq->lock);
submit_request(req, true);
}
static void linger_ping_cb(struct ceph_osd_request *req)
{
struct ceph_osd_linger_request *lreq = req->r_priv;
mutex_lock(&lreq->lock);
if (req != lreq->ping_req) {
dout("%s lreq %p linger_id %llu unknown req (%p != %p)\n",
__func__, lreq, lreq->linger_id, req, lreq->ping_req);
goto out;
}
dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
__func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent,
lreq->last_error);
if (lreq->register_gen == req->r_ops[0].watch.gen) {
if (!req->r_result) {
lreq->watch_valid_thru = lreq->ping_sent;
} else if (!lreq->last_error) {
lreq->last_error = normalize_watch_error(req->r_result);
queue_watch_error(lreq);
}
} else {
dout("lreq %p register_gen %u ignoring old pong %u\n", lreq,
lreq->register_gen, req->r_ops[0].watch.gen);
}
out:
mutex_unlock(&lreq->lock);
linger_put(lreq);
}
static void send_linger_ping(struct ceph_osd_linger_request *lreq)
{
struct ceph_osd_client *osdc = lreq->osdc;
struct ceph_osd_request *req;
int ret;
if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
dout("%s PAUSERD\n", __func__);
return;
}
lreq->ping_sent = jiffies;
dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n",
__func__, lreq, lreq->linger_id, lreq->ping_sent,
lreq->register_gen);
if (lreq->ping_req) {
if (lreq->ping_req->r_osd)
cancel_linger_request(lreq->ping_req);
ceph_osdc_put_request(lreq->ping_req);
}
req = ceph_osdc_alloc_request(osdc, NULL, 1, true, GFP_NOIO);
BUG_ON(!req);
target_copy(&req->r_t, &lreq->t);
osd_req_op_watch_init(req, 0, CEPH_OSD_WATCH_OP_PING, lreq->linger_id,
lreq->register_gen);
req->r_callback = linger_ping_cb;
ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
BUG_ON(ret);
req->r_priv = linger_get(lreq);
req->r_linger = true;
lreq->ping_req = req;
ceph_osdc_get_request(req);
account_request(req);
req->r_tid = atomic64_inc_return(&osdc->last_tid);
link_request(lreq->osd, req);
send_request(req);
}
static void linger_submit(struct ceph_osd_linger_request *lreq)
{
struct ceph_osd_client *osdc = lreq->osdc;
struct ceph_osd *osd;
down_write(&osdc->lock);
linger_register(lreq);
calc_target(osdc, &lreq->t, false);
osd = lookup_create_osd(osdc, lreq->t.osd, true);
link_linger(osd, lreq);
send_linger(lreq);
up_write(&osdc->lock);
}
static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq)
{
struct ceph_osd_client *osdc = lreq->osdc;
struct ceph_osd_linger_request *lookup_lreq;
verify_osdc_wrlocked(osdc);
lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
lreq->linger_id);
if (!lookup_lreq)
return;
WARN_ON(lookup_lreq != lreq);
erase_linger_mc(&osdc->linger_map_checks, lreq);
linger_put(lreq);
}
/*
* @lreq has to be both registered and linked.
*/
static void __linger_cancel(struct ceph_osd_linger_request *lreq)
{
if (lreq->ping_req && lreq->ping_req->r_osd)
cancel_linger_request(lreq->ping_req);
if (lreq->reg_req && lreq->reg_req->r_osd)
cancel_linger_request(lreq->reg_req);
cancel_linger_map_check(lreq);
unlink_linger(lreq->osd, lreq);
linger_unregister(lreq);
}
static void linger_cancel(struct ceph_osd_linger_request *lreq)
{
struct ceph_osd_client *osdc = lreq->osdc;
down_write(&osdc->lock);
if (__linger_registered(lreq))
__linger_cancel(lreq);
up_write(&osdc->lock);
}
static void send_linger_map_check(struct ceph_osd_linger_request *lreq);
static void check_linger_pool_dne(struct ceph_osd_linger_request *lreq)
{
struct ceph_osd_client *osdc = lreq->osdc;
struct ceph_osdmap *map = osdc->osdmap;
verify_osdc_wrlocked(osdc);
WARN_ON(!map->epoch);
if (lreq->register_gen) {
lreq->map_dne_bound = map->epoch;
dout("%s lreq %p linger_id %llu pool disappeared\n", __func__,
lreq, lreq->linger_id);
} else {
dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n",
__func__, lreq, lreq->linger_id, lreq->map_dne_bound,
map->epoch);
}
if (lreq->map_dne_bound) {
if (map->epoch >= lreq->map_dne_bound) {
/* we had a new enough map */
pr_info("linger_id %llu pool does not exist\n",
lreq->linger_id);
linger_reg_commit_complete(lreq, -ENOENT);
__linger_cancel(lreq);
}
} else {
send_linger_map_check(lreq);
}
}
static void linger_map_check_cb(struct ceph_mon_generic_request *greq)
{
struct ceph_osd_client *osdc = &greq->monc->client->osdc;
struct ceph_osd_linger_request *lreq;
u64 linger_id = greq->private_data;
WARN_ON(greq->result || !greq->u.newest);
down_write(&osdc->lock);
lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id);
if (!lreq) {
dout("%s linger_id %llu dne\n", __func__, linger_id);
goto out_unlock;
}
dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n",
__func__, lreq, lreq->linger_id, lreq->map_dne_bound,
greq->u.newest);
if (!lreq->map_dne_bound)
lreq->map_dne_bound = greq->u.newest;
erase_linger_mc(&osdc->linger_map_checks, lreq);
check_linger_pool_dne(lreq);
linger_put(lreq);
out_unlock:
up_write(&osdc->lock);
}
static void send_linger_map_check(struct ceph_osd_linger_request *lreq)
{
struct ceph_osd_client *osdc = lreq->osdc;
struct ceph_osd_linger_request *lookup_lreq;
int ret;
verify_osdc_wrlocked(osdc);
lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
lreq->linger_id);
if (lookup_lreq) {
WARN_ON(lookup_lreq != lreq);
return;
}
linger_get(lreq);
insert_linger_mc(&osdc->linger_map_checks, lreq);
ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
linger_map_check_cb, lreq->linger_id);
WARN_ON(ret);
}
static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq)
{
int ret;
dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
ret = wait_for_completion_killable(&lreq->reg_commit_wait);
return ret ?: lreq->reg_commit_error;
}
static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq,
unsigned long timeout)
{
long left;
dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
left = wait_for_completion_killable_timeout(&lreq->notify_finish_wait,
ceph_timeout_jiffies(timeout));
if (left <= 0)
left = left ?: -ETIMEDOUT;
else
left = lreq->notify_finish_error; /* completed */
return left;
}
/*
* Timeout callback, called every N seconds. When 1 or more OSD
* requests has been active for more than N seconds, we send a keepalive
* (tag + timestamp) to its OSD to ensure any communications channel
* reset is detected.
*/
static void handle_timeout(struct work_struct *work)
{
struct ceph_osd_client *osdc =
container_of(work, struct ceph_osd_client, timeout_work.work);
struct ceph_options *opts = osdc->client->options;
unsigned long cutoff = jiffies - opts->osd_keepalive_timeout;
unsigned long expiry_cutoff = jiffies - opts->osd_request_timeout;
LIST_HEAD(slow_osds);
struct rb_node *n, *p;
dout("%s osdc %p\n", __func__, osdc);
down_write(&osdc->lock);
/*
* ping osds that are a bit slow. this ensures that if there
* is a break in the TCP connection we will notice, and reopen
* a connection with that osd (from the fault callback).
*/
for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
bool found = false;
for (p = rb_first(&osd->o_requests); p; ) {
struct ceph_osd_request *req =
rb_entry(p, struct ceph_osd_request, r_node);
p = rb_next(p); /* abort_request() */
if (time_before(req->r_stamp, cutoff)) {
dout(" req %p tid %llu on osd%d is laggy\n",
req, req->r_tid, osd->o_osd);
found = true;
}
if (opts->osd_request_timeout &&
time_before(req->r_start_stamp, expiry_cutoff)) {
pr_err_ratelimited("tid %llu on osd%d timeout\n",
req->r_tid, osd->o_osd);
abort_request(req, -ETIMEDOUT);
}
}
for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) {
struct ceph_osd_linger_request *lreq =
rb_entry(p, struct ceph_osd_linger_request, node);
dout(" lreq %p linger_id %llu is served by osd%d\n",
lreq, lreq->linger_id, osd->o_osd);
found = true;
mutex_lock(&lreq->lock);
if (lreq->is_watch && lreq->committed && !lreq->last_error)
send_linger_ping(lreq);
mutex_unlock(&lreq->lock);
}
if (found)
list_move_tail(&osd->o_keepalive_item, &slow_osds);
}
if (opts->osd_request_timeout) {
for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
struct ceph_osd_request *req =
rb_entry(p, struct ceph_osd_request, r_node);
p = rb_next(p); /* abort_request() */
if (time_before(req->r_start_stamp, expiry_cutoff)) {
pr_err_ratelimited("tid %llu on osd%d timeout\n",
req->r_tid, osdc->homeless_osd.o_osd);
abort_request(req, -ETIMEDOUT);
}
}
}
if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds))
maybe_request_map(osdc);
while (!list_empty(&slow_osds)) {
struct ceph_osd *osd = list_first_entry(&slow_osds,
struct ceph_osd,
o_keepalive_item);
list_del_init(&osd->o_keepalive_item);
ceph_con_keepalive(&osd->o_con);
}
up_write(&osdc->lock);
schedule_delayed_work(&osdc->timeout_work,
osdc->client->options->osd_keepalive_timeout);
}
static void handle_osds_timeout(struct work_struct *work)
{
struct ceph_osd_client *osdc =
container_of(work, struct ceph_osd_client,
osds_timeout_work.work);
unsigned long delay = osdc->client->options->osd_idle_ttl / 4;
struct ceph_osd *osd, *nosd;
dout("%s osdc %p\n", __func__, osdc);
down_write(&osdc->lock);
list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
if (time_before(jiffies, osd->lru_ttl))
break;
WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
close_osd(osd);
}
up_write(&osdc->lock);
schedule_delayed_work(&osdc->osds_timeout_work,
round_jiffies_relative(delay));
}
static int ceph_oloc_decode(void **p, void *end,
struct ceph_object_locator *oloc)
{
u8 struct_v, struct_cv;
u32 len;
void *struct_end;
int ret = 0;
ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
struct_v = ceph_decode_8(p);
struct_cv = ceph_decode_8(p);
if (struct_v < 3) {
pr_warn("got v %d < 3 cv %d of ceph_object_locator\n",
struct_v, struct_cv);
goto e_inval;
}
if (struct_cv > 6) {
pr_warn("got v %d cv %d > 6 of ceph_object_locator\n",
struct_v, struct_cv);
goto e_inval;
}
len = ceph_decode_32(p);
ceph_decode_need(p, end, len, e_inval);
struct_end = *p + len;
oloc->pool = ceph_decode_64(p);
*p += 4; /* skip preferred */
len = ceph_decode_32(p);
if (len > 0) {
pr_warn("ceph_object_locator::key is set\n");
goto e_inval;
}
if (struct_v >= 5) {
bool changed = false;
len = ceph_decode_32(p);
if (len > 0) {
ceph_decode_need(p, end, len, e_inval);
if (!oloc->pool_ns ||
ceph_compare_string(oloc->pool_ns, *p, len))
changed = true;
*p += len;
} else {
if (oloc->pool_ns)
changed = true;
}
if (changed) {
/* redirect changes namespace */
pr_warn("ceph_object_locator::nspace is changed\n");
goto e_inval;
}
}
if (struct_v >= 6) {
s64 hash = ceph_decode_64(p);
if (hash != -1) {
pr_warn("ceph_object_locator::hash is set\n");
goto e_inval;
}
}
/* skip the rest */
*p = struct_end;
out:
return ret;
e_inval:
ret = -EINVAL;
goto out;
}
static int ceph_redirect_decode(void **p, void *end,
struct ceph_request_redirect *redir)
{
u8 struct_v, struct_cv;
u32 len;
void *struct_end;
int ret;
ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
struct_v = ceph_decode_8(p);
struct_cv = ceph_decode_8(p);
if (struct_cv > 1) {
pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n",
struct_v, struct_cv);
goto e_inval;
}
len = ceph_decode_32(p);
ceph_decode_need(p, end, len, e_inval);
struct_end = *p + len;
ret = ceph_oloc_decode(p, end, &redir->oloc);
if (ret)
goto out;
len = ceph_decode_32(p);
if (len > 0) {
pr_warn("ceph_request_redirect::object_name is set\n");
goto e_inval;
}
/* skip the rest */
*p = struct_end;
out:
return ret;
e_inval:
ret = -EINVAL;
goto out;
}
struct MOSDOpReply {
struct ceph_pg pgid;
u64 flags;
int result;
u32 epoch;
int num_ops;
u32 outdata_len[CEPH_OSD_MAX_OPS];
s32 rval[CEPH_OSD_MAX_OPS];
int retry_attempt;
struct ceph_eversion replay_version;
u64 user_version;
struct ceph_request_redirect redirect;
};
static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m)
{
void *p = msg->front.iov_base;
void *const end = p + msg->front.iov_len;
u16 version = le16_to_cpu(msg->hdr.version);
struct ceph_eversion bad_replay_version;
u8 decode_redir;
u32 len;
int ret;
int i;
ceph_decode_32_safe(&p, end, len, e_inval);
ceph_decode_need(&p, end, len, e_inval);
p += len; /* skip oid */
ret = ceph_decode_pgid(&p, end, &m->pgid);
if (ret)
return ret;
ceph_decode_64_safe(&p, end, m->flags, e_inval);
ceph_decode_32_safe(&p, end, m->result, e_inval);
ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval);
memcpy(&bad_replay_version, p, sizeof(bad_replay_version));
p += sizeof(bad_replay_version);
ceph_decode_32_safe(&p, end, m->epoch, e_inval);
ceph_decode_32_safe(&p, end, m->num_ops, e_inval);
if (m->num_ops > ARRAY_SIZE(m->outdata_len))
goto e_inval;
ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op),
e_inval);
for (i = 0; i < m->num_ops; i++) {
struct ceph_osd_op *op = p;
m->outdata_len[i] = le32_to_cpu(op->payload_len);
p += sizeof(*op);
}
ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval);
for (i = 0; i < m->num_ops; i++)
ceph_decode_32_safe(&p, end, m->rval[i], e_inval);
if (version >= 5) {
ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval);
memcpy(&m->replay_version, p, sizeof(m->replay_version));
p += sizeof(m->replay_version);
ceph_decode_64_safe(&p, end, m->user_version, e_inval);
} else {
m->replay_version = bad_replay_version; /* struct */
m->user_version = le64_to_cpu(m->replay_version.version);
}
if (version >= 6) {
if (version >= 7)
ceph_decode_8_safe(&p, end, decode_redir, e_inval);
else
decode_redir = 1;
} else {
decode_redir = 0;
}
if (decode_redir) {
ret = ceph_redirect_decode(&p, end, &m->redirect);
if (ret)
return ret;
} else {
ceph_oloc_init(&m->redirect.oloc);
}
return 0;
e_inval:
return -EINVAL;
}
/*
* Handle MOSDOpReply. Set ->r_result and call the callback if it is
* specified.
*/
static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
{
struct ceph_osd_client *osdc = osd->o_osdc;
struct ceph_osd_request *req;
struct MOSDOpReply m;
u64 tid = le64_to_cpu(msg->hdr.tid);
u32 data_len = 0;
int ret;
int i;
dout("%s msg %p tid %llu\n", __func__, msg, tid);
down_read(&osdc->lock);
if (!osd_registered(osd)) {
dout("%s osd%d unknown\n", __func__, osd->o_osd);
goto out_unlock_osdc;
}
WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
mutex_lock(&osd->lock);
req = lookup_request(&osd->o_requests, tid);
if (!req) {
dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid);
goto out_unlock_session;
}
m.redirect.oloc.pool_ns = req->r_t.target_oloc.pool_ns;
ret = decode_MOSDOpReply(msg, &m);
m.redirect.oloc.pool_ns = NULL;
if (ret) {
pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
req->r_tid, ret);
ceph_msg_dump(msg);
goto fail_request;
}
dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n",
__func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed,
m.epoch, m.retry_attempt, le32_to_cpu(m.replay_version.epoch),
le64_to_cpu(m.replay_version.version), m.user_version);
if (m.retry_attempt >= 0) {
if (m.retry_attempt != req->r_attempts - 1) {
dout("req %p tid %llu retry_attempt %d != %d, ignoring\n",
req, req->r_tid, m.retry_attempt,
req->r_attempts - 1);
goto out_unlock_session;
}
} else {
WARN_ON(1); /* MOSDOpReply v4 is assumed */
}
if (!ceph_oloc_empty(&m.redirect.oloc)) {
dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid,
m.redirect.oloc.pool);
unlink_request(osd, req);
mutex_unlock(&osd->lock);
/*
* Not ceph_oloc_copy() - changing pool_ns is not
* supported.
*/
req->r_t.target_oloc.pool = m.redirect.oloc.pool;
req->r_flags |= CEPH_OSD_FLAG_REDIRECTED |
CEPH_OSD_FLAG_IGNORE_OVERLAY |
CEPH_OSD_FLAG_IGNORE_CACHE;
req->r_tid = 0;
__submit_request(req, false);
goto out_unlock_osdc;
}
if (m.result == -EAGAIN) {
dout("req %p tid %llu EAGAIN\n", req, req->r_tid);
unlink_request(osd, req);
mutex_unlock(&osd->lock);
/*
* The object is missing on the replica or not (yet)
* readable. Clear pgid to force a resend to the primary
* via legacy_change.
*/
req->r_t.pgid.pool = 0;
req->r_t.pgid.seed = 0;
WARN_ON(!req->r_t.used_replica);
req->r_flags &= ~(CEPH_OSD_FLAG_BALANCE_READS |
CEPH_OSD_FLAG_LOCALIZE_READS);
req->r_tid = 0;
__submit_request(req, false);
goto out_unlock_osdc;
}
if (m.num_ops != req->r_num_ops) {
pr_err("num_ops %d != %d for tid %llu\n", m.num_ops,
req->r_num_ops, req->r_tid);
goto fail_request;
}
for (i = 0; i < req->r_num_ops; i++) {
dout(" req %p tid %llu op %d rval %d len %u\n", req,
req->r_tid, i, m.rval[i], m.outdata_len[i]);
req->r_ops[i].rval = m.rval[i];
req->r_ops[i].outdata_len = m.outdata_len[i];
data_len += m.outdata_len[i];
}
if (data_len != le32_to_cpu(msg->hdr.data_len)) {
pr_err("sum of lens %u != %u for tid %llu\n", data_len,
le32_to_cpu(msg->hdr.data_len), req->r_tid);
goto fail_request;
}
dout("%s req %p tid %llu result %d data_len %u\n", __func__,
req, req->r_tid, m.result, data_len);
/*
* Since we only ever request ONDISK, we should only ever get
* one (type of) reply back.
*/
WARN_ON(!(m.flags & CEPH_OSD_FLAG_ONDISK));
req->r_version = m.user_version;
req->r_result = m.result ?: data_len;
finish_request(req);
mutex_unlock(&osd->lock);
up_read(&osdc->lock);
__complete_request(req);
return;
fail_request:
complete_request(req, -EIO);
out_unlock_session:
mutex_unlock(&osd->lock);
out_unlock_osdc:
up_read(&osdc->lock);
}
static void set_pool_was_full(struct ceph_osd_client *osdc)
{
struct rb_node *n;
for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
struct ceph_pg_pool_info *pi =
rb_entry(n, struct ceph_pg_pool_info, node);
pi->was_full = __pool_full(pi);
}
}
static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id)
{
struct ceph_pg_pool_info *pi;
pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
if (!pi)
return false;
return pi->was_full && !__pool_full(pi);
}
static enum calc_target_result
recalc_linger_target(struct ceph_osd_linger_request *lreq)
{
struct ceph_osd_client *osdc = lreq->osdc;
enum calc_target_result ct_res;
ct_res = calc_target(osdc, &lreq->t, true);
if (ct_res == CALC_TARGET_NEED_RESEND) {
struct ceph_osd *osd;
osd = lookup_create_osd(osdc, lreq->t.osd, true);
if (osd != lreq->osd) {
unlink_linger(lreq->osd, lreq);
link_linger(osd, lreq);
}
}
return ct_res;
}
/*
* Requeue requests whose mapping to an OSD has changed.
*/
static void scan_requests(struct ceph_osd *osd,
bool force_resend,
bool cleared_full,
bool check_pool_cleared_full,
struct rb_root *need_resend,
struct list_head *need_resend_linger)
{
struct ceph_osd_client *osdc = osd->o_osdc;
struct rb_node *n;
bool force_resend_writes;
for (n = rb_first(&osd->o_linger_requests); n; ) {
struct ceph_osd_linger_request *lreq =
rb_entry(n, struct ceph_osd_linger_request, node);
enum calc_target_result ct_res;
n = rb_next(n); /* recalc_linger_target() */
dout("%s lreq %p linger_id %llu\n", __func__, lreq,
lreq->linger_id);
ct_res = recalc_linger_target(lreq);
switch (ct_res) {
case CALC_TARGET_NO_ACTION:
force_resend_writes = cleared_full ||
(check_pool_cleared_full &&
pool_cleared_full(osdc, lreq->t.base_oloc.pool));
if (!force_resend && !force_resend_writes)
break;
fallthrough;
case CALC_TARGET_NEED_RESEND:
cancel_linger_map_check(lreq);
/*
* scan_requests() for the previous epoch(s)
* may have already added it to the list, since
* it's not unlinked here.
*/
if (list_empty(&lreq->scan_item))
list_add_tail(&lreq->scan_item, need_resend_linger);
break;
case CALC_TARGET_POOL_DNE:
list_del_init(&lreq->scan_item);
check_linger_pool_dne(lreq);
break;
}
}
for (n = rb_first(&osd->o_requests); n; ) {
struct ceph_osd_request *req =
rb_entry(n, struct ceph_osd_request, r_node);
enum calc_target_result ct_res;
n = rb_next(n); /* unlink_request(), check_pool_dne() */
dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
ct_res = calc_target(osdc, &req->r_t, false);
switch (ct_res) {
case CALC_TARGET_NO_ACTION:
force_resend_writes = cleared_full ||
(check_pool_cleared_full &&
pool_cleared_full(osdc, req->r_t.base_oloc.pool));
if (!force_resend &&
(!(req->r_flags & CEPH_OSD_FLAG_WRITE) ||
!force_resend_writes))
break;
fallthrough;
case CALC_TARGET_NEED_RESEND:
cancel_map_check(req);
unlink_request(osd, req);
insert_request(need_resend, req);
break;
case CALC_TARGET_POOL_DNE:
check_pool_dne(req);
break;
}
}
}
static int handle_one_map(struct ceph_osd_client *osdc,
void *p, void *end, bool incremental,
struct rb_root *need_resend,
struct list_head *need_resend_linger)
{
struct ceph_osdmap *newmap;
struct rb_node *n;
bool skipped_map = false;
bool was_full;
was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
set_pool_was_full(osdc);
if (incremental)
newmap = osdmap_apply_incremental(&p, end,
ceph_msgr2(osdc->client),
osdc->osdmap);
else
newmap = ceph_osdmap_decode(&p, end, ceph_msgr2(osdc->client));
if (IS_ERR(newmap))
return PTR_ERR(newmap);
if (newmap != osdc->osdmap) {
/*
* Preserve ->was_full before destroying the old map.
* For pools that weren't in the old map, ->was_full
* should be false.
*/
for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) {
struct ceph_pg_pool_info *pi =
rb_entry(n, struct ceph_pg_pool_info, node);
struct ceph_pg_pool_info *old_pi;
old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id);
if (old_pi)
pi->was_full = old_pi->was_full;
else
WARN_ON(pi->was_full);
}
if (osdc->osdmap->epoch &&
osdc->osdmap->epoch + 1 < newmap->epoch) {
WARN_ON(incremental);
skipped_map = true;
}
ceph_osdmap_destroy(osdc->osdmap);
osdc->osdmap = newmap;
}
was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
need_resend, need_resend_linger);
for (n = rb_first(&osdc->osds); n; ) {
struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
n = rb_next(n); /* close_osd() */
scan_requests(osd, skipped_map, was_full, true, need_resend,
need_resend_linger);
if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
memcmp(&osd->o_con.peer_addr,
ceph_osd_addr(osdc->osdmap, osd->o_osd),
sizeof(struct ceph_entity_addr)))
close_osd(osd);
}
return 0;
}
static void kick_requests(struct ceph_osd_client *osdc,
struct rb_root *need_resend,
struct list_head *need_resend_linger)
{
struct ceph_osd_linger_request *lreq, *nlreq;
enum calc_target_result ct_res;
struct rb_node *n;
/* make sure need_resend targets reflect latest map */
for (n = rb_first(need_resend); n; ) {
struct ceph_osd_request *req =
rb_entry(n, struct ceph_osd_request, r_node);
n = rb_next(n);
if (req->r_t.epoch < osdc->osdmap->epoch) {
ct_res = calc_target(osdc, &req->r_t, false);
if (ct_res == CALC_TARGET_POOL_DNE) {
erase_request(need_resend, req);
check_pool_dne(req);
}
}
}
for (n = rb_first(need_resend); n; ) {
struct ceph_osd_request *req =
rb_entry(n, struct ceph_osd_request, r_node);
struct ceph_osd *osd;
n = rb_next(n);
erase_request(need_resend, req); /* before link_request() */
osd = lookup_create_osd(osdc, req->r_t.osd, true);
link_request(osd, req);
if (!req->r_linger) {
if (!osd_homeless(osd) && !req->r_t.paused)
send_request(req);
} else {
cancel_linger_request(req);
}
}
list_for_each_entry_safe(lreq, nlreq, need_resend_linger, scan_item) {
if (!osd_homeless(lreq->osd))
send_linger(lreq);
list_del_init(&lreq->scan_item);
}
}
/*
* Process updated osd map.
*
* The message contains any number of incremental and full maps, normally
* indicating some sort of topology change in the cluster. Kick requests
* off to different OSDs as needed.
*/
void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
{
void *p = msg->front.iov_base;
void *const end = p + msg->front.iov_len;
u32 nr_maps, maplen;
u32 epoch;
struct ceph_fsid fsid;
struct rb_root need_resend = RB_ROOT;
LIST_HEAD(need_resend_linger);
bool handled_incremental = false;
bool was_pauserd, was_pausewr;
bool pauserd, pausewr;
int err;
dout("%s have %u\n", __func__, osdc->osdmap->epoch);
down_write(&osdc->lock);
/* verify fsid */
ceph_decode_need(&p, end, sizeof(fsid), bad);
ceph_decode_copy(&p, &fsid, sizeof(fsid));
if (ceph_check_fsid(osdc->client, &fsid) < 0)
goto bad;
was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
have_pool_full(osdc);
/* incremental maps */
ceph_decode_32_safe(&p, end, nr_maps, bad);
dout(" %d inc maps\n", nr_maps);
while (nr_maps > 0) {
ceph_decode_need(&p, end, 2*sizeof(u32), bad);
epoch = ceph_decode_32(&p);
maplen = ceph_decode_32(&p);
ceph_decode_need(&p, end, maplen, bad);
if (osdc->osdmap->epoch &&
osdc->osdmap->epoch + 1 == epoch) {
dout("applying incremental map %u len %d\n",
epoch, maplen);
err = handle_one_map(osdc, p, p + maplen, true,
&need_resend, &need_resend_linger);
if (err)
goto bad;
handled_incremental = true;
} else {
dout("ignoring incremental map %u len %d\n",
epoch, maplen);
}
p += maplen;
nr_maps--;
}
if (handled_incremental)
goto done;
/* full maps */
ceph_decode_32_safe(&p, end, nr_maps, bad);
dout(" %d full maps\n", nr_maps);
while (nr_maps) {
ceph_decode_need(&p, end, 2*sizeof(u32), bad);
epoch = ceph_decode_32(&p);
maplen = ceph_decode_32(&p);
ceph_decode_need(&p, end, maplen, bad);
if (nr_maps > 1) {
dout("skipping non-latest full map %u len %d\n",
epoch, maplen);
} else if (osdc->osdmap->epoch >= epoch) {
dout("skipping full map %u len %d, "
"older than our %u\n", epoch, maplen,
osdc->osdmap->epoch);
} else {
dout("taking full map %u len %d\n", epoch, maplen);
err = handle_one_map(osdc, p, p + maplen, false,
&need_resend, &need_resend_linger);
if (err)
goto bad;
}
p += maplen;
nr_maps--;
}
done:
/*
* subscribe to subsequent osdmap updates if full to ensure
* we find out when we are no longer full and stop returning
* ENOSPC.
*/
pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
have_pool_full(osdc);
if (was_pauserd || was_pausewr || pauserd || pausewr ||
osdc->osdmap->epoch < osdc->epoch_barrier)
maybe_request_map(osdc);
kick_requests(osdc, &need_resend, &need_resend_linger);
ceph_osdc_abort_on_full(osdc);
ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
osdc->osdmap->epoch);
up_write(&osdc->lock);
wake_up_all(&osdc->client->auth_wq);
return;
bad:
pr_err("osdc handle_map corrupt msg\n");
ceph_msg_dump(msg);
up_write(&osdc->lock);
}
/*
* Resubmit requests pending on the given osd.
*/
static void kick_osd_requests(struct ceph_osd *osd)
{
struct rb_node *n;
clear_backoffs(osd);
for (n = rb_first(&osd->o_requests); n; ) {
struct ceph_osd_request *req =
rb_entry(n, struct ceph_osd_request, r_node);
n = rb_next(n); /* cancel_linger_request() */
if (!req->r_linger) {
if (!req->r_t.paused)
send_request(req);
} else {
cancel_linger_request(req);
}
}
for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) {
struct ceph_osd_linger_request *lreq =
rb_entry(n, struct ceph_osd_linger_request, node);
send_linger(lreq);
}
}
/*
* If the osd connection drops, we need to resubmit all requests.
*/
static void osd_fault(struct ceph_connection *con)
{
struct ceph_osd *osd = con->private;
struct ceph_osd_client *osdc = osd->o_osdc;
dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
down_write(&osdc->lock);
if (!osd_registered(osd)) {
dout("%s osd%d unknown\n", __func__, osd->o_osd);
goto out_unlock;
}
if (!reopen_osd(osd))
kick_osd_requests(osd);
maybe_request_map(osdc);
out_unlock:
up_write(&osdc->lock);
}
struct MOSDBackoff {
struct ceph_spg spgid;
u32 map_epoch;
u8 op;
u64 id;
struct ceph_hobject_id *begin;
struct ceph_hobject_id *end;
};
static int decode_MOSDBackoff(const struct ceph_msg *msg, struct MOSDBackoff *m)
{
void *p = msg->front.iov_base;
void *const end = p + msg->front.iov_len;
u8 struct_v;
u32 struct_len;
int ret;
ret = ceph_start_decoding(&p, end, 1, "spg_t", &struct_v, &struct_len);
if (ret)
return ret;
ret = ceph_decode_pgid(&p, end, &m->spgid.pgid);
if (ret)
return ret;
ceph_decode_8_safe(&p, end, m->spgid.shard, e_inval);
ceph_decode_32_safe(&p, end, m->map_epoch, e_inval);
ceph_decode_8_safe(&p, end, m->op, e_inval);
ceph_decode_64_safe(&p, end, m->id, e_inval);
m->begin = kzalloc(sizeof(*m->begin), GFP_NOIO);
if (!m->begin)
return -ENOMEM;
ret = decode_hoid(&p, end, m->begin);
if (ret) {
free_hoid(m->begin);
return ret;
}
m->end = kzalloc(sizeof(*m->end), GFP_NOIO);
if (!m->end) {
free_hoid(m->begin);
return -ENOMEM;
}
ret = decode_hoid(&p, end, m->end);
if (ret) {
free_hoid(m->begin);
free_hoid(m->end);
return ret;
}
return 0;
e_inval:
return -EINVAL;
}
static struct ceph_msg *create_backoff_message(
const struct ceph_osd_backoff *backoff,
u32 map_epoch)
{
struct ceph_msg *msg;
void *p, *end;
int msg_size;
msg_size = CEPH_ENCODING_START_BLK_LEN +
CEPH_PGID_ENCODING_LEN + 1; /* spgid */
msg_size += 4 + 1 + 8; /* map_epoch, op, id */
msg_size += CEPH_ENCODING_START_BLK_LEN +
hoid_encoding_size(backoff->begin);
msg_size += CEPH_ENCODING_START_BLK_LEN +
hoid_encoding_size(backoff->end);
msg = ceph_msg_new(CEPH_MSG_OSD_BACKOFF, msg_size, GFP_NOIO, true);
if (!msg)
return NULL;
p = msg->front.iov_base;
end = p + msg->front_alloc_len;
encode_spgid(&p, &backoff->spgid);
ceph_encode_32(&p, map_epoch);
ceph_encode_8(&p, CEPH_OSD_BACKOFF_OP_ACK_BLOCK);
ceph_encode_64(&p, backoff->id);
encode_hoid(&p, end, backoff->begin);
encode_hoid(&p, end, backoff->end);
BUG_ON(p != end);
msg->front.iov_len = p - msg->front.iov_base;
msg->hdr.version = cpu_to_le16(1); /* MOSDBackoff v1 */
msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
return msg;
}
static void handle_backoff_block(struct ceph_osd *osd, struct MOSDBackoff *m)
{
struct ceph_spg_mapping *spg;
struct ceph_osd_backoff *backoff;
struct ceph_msg *msg;
dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd,
m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
spg = lookup_spg_mapping(&osd->o_backoff_mappings, &m->spgid);
if (!spg) {
spg = alloc_spg_mapping();
if (!spg) {
pr_err("%s failed to allocate spg\n", __func__);
return;
}
spg->spgid = m->spgid; /* struct */
insert_spg_mapping(&osd->o_backoff_mappings, spg);
}
backoff = alloc_backoff();
if (!backoff) {
pr_err("%s failed to allocate backoff\n", __func__);
return;
}
backoff->spgid = m->spgid; /* struct */
backoff->id = m->id;
backoff->begin = m->begin;
m->begin = NULL; /* backoff now owns this */
backoff->end = m->end;
m->end = NULL; /* ditto */
insert_backoff(&spg->backoffs, backoff);
insert_backoff_by_id(&osd->o_backoffs_by_id, backoff);
/*
* Ack with original backoff's epoch so that the OSD can
* discard this if there was a PG split.
*/
msg = create_backoff_message(backoff, m->map_epoch);
if (!msg) {
pr_err("%s failed to allocate msg\n", __func__);
return;
}
ceph_con_send(&osd->o_con, msg);
}
static bool target_contained_by(const struct ceph_osd_request_target *t,
const struct ceph_hobject_id *begin,
const struct ceph_hobject_id *end)
{
struct ceph_hobject_id hoid;
int cmp;
hoid_fill_from_target(&hoid, t);
cmp = hoid_compare(&hoid, begin);
return !cmp || (cmp > 0 && hoid_compare(&hoid, end) < 0);
}
static void handle_backoff_unblock(struct ceph_osd *osd,
const struct MOSDBackoff *m)
{
struct ceph_spg_mapping *spg;
struct ceph_osd_backoff *backoff;
struct rb_node *n;
dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd,
m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
backoff = lookup_backoff_by_id(&osd->o_backoffs_by_id, m->id);
if (!backoff) {
pr_err("%s osd%d spgid %llu.%xs%d id %llu backoff dne\n",
__func__, osd->o_osd, m->spgid.pgid.pool,
m->spgid.pgid.seed, m->spgid.shard, m->id);
return;
}
if (hoid_compare(backoff->begin, m->begin) &&
hoid_compare(backoff->end, m->end)) {
pr_err("%s osd%d spgid %llu.%xs%d id %llu bad range?\n",
__func__, osd->o_osd, m->spgid.pgid.pool,
m->spgid.pgid.seed, m->spgid.shard, m->id);
/* unblock it anyway... */
}
spg = lookup_spg_mapping(&osd->o_backoff_mappings, &backoff->spgid);
BUG_ON(!spg);
erase_backoff(&spg->backoffs, backoff);
erase_backoff_by_id(&osd->o_backoffs_by_id, backoff);
free_backoff(backoff);
if (RB_EMPTY_ROOT(&spg->backoffs)) {
erase_spg_mapping(&osd->o_backoff_mappings, spg);
free_spg_mapping(spg);
}
for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
struct ceph_osd_request *req =
rb_entry(n, struct ceph_osd_request, r_node);
if (!ceph_spg_compare(&req->r_t.spgid, &m->spgid)) {
/*
* Match against @m, not @backoff -- the PG may
* have split on the OSD.
*/
if (target_contained_by(&req->r_t, m->begin, m->end)) {
/*
* If no other installed backoff applies,
* resend.
*/
send_request(req);
}
}
}
}
static void handle_backoff(struct ceph_osd *osd, struct ceph_msg *msg)
{
struct ceph_osd_client *osdc = osd->o_osdc;
struct MOSDBackoff m;
int ret;
down_read(&osdc->lock);
if (!osd_registered(osd)) {
dout("%s osd%d unknown\n", __func__, osd->o_osd);
up_read(&osdc->lock);
return;
}
WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
mutex_lock(&osd->lock);
ret = decode_MOSDBackoff(msg, &m);
if (ret) {
pr_err("failed to decode MOSDBackoff: %d\n", ret);
ceph_msg_dump(msg);
goto out_unlock;
}
switch (m.op) {
case CEPH_OSD_BACKOFF_OP_BLOCK:
handle_backoff_block(osd, &m);
break;
case CEPH_OSD_BACKOFF_OP_UNBLOCK:
handle_backoff_unblock(osd, &m);
break;
default:
pr_err("%s osd%d unknown op %d\n", __func__, osd->o_osd, m.op);
}
free_hoid(m.begin);
free_hoid(m.end);
out_unlock:
mutex_unlock(&osd->lock);
up_read(&osdc->lock);
}
/*
* Process osd watch notifications
*/
static void handle_watch_notify(struct ceph_osd_client *osdc,
struct ceph_msg *msg)
{
void *p = msg->front.iov_base;
void *const end = p + msg->front.iov_len;
struct ceph_osd_linger_request *lreq;
struct linger_work *lwork;
u8 proto_ver, opcode;
u64 cookie, notify_id;
u64 notifier_id = 0;
s32 return_code = 0;
void *payload = NULL;
u32 payload_len = 0;
ceph_decode_8_safe(&p, end, proto_ver, bad);
ceph_decode_8_safe(&p, end, opcode, bad);
ceph_decode_64_safe(&p, end, cookie, bad);
p += 8; /* skip ver */
ceph_decode_64_safe(&p, end, notify_id, bad);
if (proto_ver >= 1) {
ceph_decode_32_safe(&p, end, payload_len, bad);
ceph_decode_need(&p, end, payload_len, bad);
payload = p;
p += payload_len;
}
if (le16_to_cpu(msg->hdr.version) >= 2)
ceph_decode_32_safe(&p, end, return_code, bad);
if (le16_to_cpu(msg->hdr.version) >= 3)
ceph_decode_64_safe(&p, end, notifier_id, bad);
down_read(&osdc->lock);
lreq = lookup_linger_osdc(&osdc->linger_requests, cookie);
if (!lreq) {
dout("%s opcode %d cookie %llu dne\n", __func__, opcode,
cookie);
goto out_unlock_osdc;
}
mutex_lock(&lreq->lock);
dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__,
opcode, cookie, lreq, lreq->is_watch);
if (opcode == CEPH_WATCH_EVENT_DISCONNECT) {
if (!lreq->last_error) {
lreq->last_error = -ENOTCONN;
queue_watch_error(lreq);
}
} else if (!lreq->is_watch) {
/* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */
if (lreq->notify_id && lreq->notify_id != notify_id) {
dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq,
lreq->notify_id, notify_id);
} else if (!completion_done(&lreq->notify_finish_wait)) {
struct ceph_msg_data *data =
msg->num_data_items ? &msg->data[0] : NULL;
if (data) {
if (lreq->preply_pages) {
WARN_ON(data->type !=
CEPH_MSG_DATA_PAGES);
*lreq->preply_pages = data->pages;
*lreq->preply_len = data->length;
data->own_pages = false;
}
}
lreq->notify_finish_error = return_code;
complete_all(&lreq->notify_finish_wait);
}
} else {
/* CEPH_WATCH_EVENT_NOTIFY */
lwork = lwork_alloc(lreq, do_watch_notify);
if (!lwork) {
pr_err("failed to allocate notify-lwork\n");
goto out_unlock_lreq;
}
lwork->notify.notify_id = notify_id;
lwork->notify.notifier_id = notifier_id;
lwork->notify.payload = payload;
lwork->notify.payload_len = payload_len;
lwork->notify.msg = ceph_msg_get(msg);
lwork_queue(lwork);
}
out_unlock_lreq:
mutex_unlock(&lreq->lock);
out_unlock_osdc:
up_read(&osdc->lock);
return;
bad:
pr_err("osdc handle_watch_notify corrupt msg\n");
}
/*
* Register request, send initial attempt.
*/
void ceph_osdc_start_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req)
{
down_read(&osdc->lock);
submit_request(req, false);
up_read(&osdc->lock);
}
EXPORT_SYMBOL(ceph_osdc_start_request);
/*
* Unregister request. If @req was registered, it isn't completed:
* r_result isn't set and __complete_request() isn't invoked.
*
* If @req wasn't registered, this call may have raced with
* handle_reply(), in which case r_result would already be set and
* __complete_request() would be getting invoked, possibly even
* concurrently with this call.
*/
void ceph_osdc_cancel_request(struct ceph_osd_request *req)
{
struct ceph_osd_client *osdc = req->r_osdc;
down_write(&osdc->lock);
if (req->r_osd)
cancel_request(req);
up_write(&osdc->lock);
}
EXPORT_SYMBOL(ceph_osdc_cancel_request);
/*
* @timeout: in jiffies, 0 means "wait forever"
*/
static int wait_request_timeout(struct ceph_osd_request *req,
unsigned long timeout)
{
long left;
dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
left = wait_for_completion_killable_timeout(&req->r_completion,
ceph_timeout_jiffies(timeout));
if (left <= 0) {
left = left ?: -ETIMEDOUT;
ceph_osdc_cancel_request(req);
} else {
left = req->r_result; /* completed */
}
return left;
}
/*
* wait for a request to complete
*/
int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req)
{
return wait_request_timeout(req, 0);
}
EXPORT_SYMBOL(ceph_osdc_wait_request);
/*
* sync - wait for all in-flight requests to flush. avoid starvation.
*/
void ceph_osdc_sync(struct ceph_osd_client *osdc)
{
struct rb_node *n, *p;
u64 last_tid = atomic64_read(&osdc->last_tid);
again:
down_read(&osdc->lock);
for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
mutex_lock(&osd->lock);
for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
struct ceph_osd_request *req =
rb_entry(p, struct ceph_osd_request, r_node);
if (req->r_tid > last_tid)
break;
if (!(req->r_flags & CEPH_OSD_FLAG_WRITE))
continue;
ceph_osdc_get_request(req);
mutex_unlock(&osd->lock);
up_read(&osdc->lock);
dout("%s waiting on req %p tid %llu last_tid %llu\n",
__func__, req, req->r_tid, last_tid);
wait_for_completion(&req->r_completion);
ceph_osdc_put_request(req);
goto again;
}
mutex_unlock(&osd->lock);
}
up_read(&osdc->lock);
dout("%s done last_tid %llu\n", __func__, last_tid);
}
EXPORT_SYMBOL(ceph_osdc_sync);
/*
* Returns a handle, caller owns a ref.
*/
struct ceph_osd_linger_request *
ceph_osdc_watch(struct ceph_osd_client *osdc,
struct ceph_object_id *oid,
struct ceph_object_locator *oloc,
rados_watchcb2_t wcb,
rados_watcherrcb_t errcb,
void *data)
{
struct ceph_osd_linger_request *lreq;
int ret;
lreq = linger_alloc(osdc);
if (!lreq)
return ERR_PTR(-ENOMEM);
lreq->is_watch = true;
lreq->wcb = wcb;
lreq->errcb = errcb;
lreq->data = data;
lreq->watch_valid_thru = jiffies;
ceph_oid_copy(&lreq->t.base_oid, oid);
ceph_oloc_copy(&lreq->t.base_oloc, oloc);
lreq->t.flags = CEPH_OSD_FLAG_WRITE;
ktime_get_real_ts64(&lreq->mtime);
linger_submit(lreq);
ret = linger_reg_commit_wait(lreq);
if (ret) {
linger_cancel(lreq);
goto err_put_lreq;
}
return lreq;
err_put_lreq:
linger_put(lreq);
return ERR_PTR(ret);
}
EXPORT_SYMBOL(ceph_osdc_watch);
/*
* Releases a ref.
*
* Times out after mount_timeout to preserve rbd unmap behaviour
* introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap
* with mount_timeout").
*/
int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
struct ceph_osd_linger_request *lreq)
{
struct ceph_options *opts = osdc->client->options;
struct ceph_osd_request *req;
int ret;
req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
if (!req)
return -ENOMEM;
ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
req->r_flags = CEPH_OSD_FLAG_WRITE;
ktime_get_real_ts64(&req->r_mtime);
osd_req_op_watch_init(req, 0, CEPH_OSD_WATCH_OP_UNWATCH,
lreq->linger_id, 0);
ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
if (ret)
goto out_put_req;
ceph_osdc_start_request(osdc, req);
linger_cancel(lreq);
linger_put(lreq);
ret = wait_request_timeout(req, opts->mount_timeout);
out_put_req:
ceph_osdc_put_request(req);
return ret;
}
EXPORT_SYMBOL(ceph_osdc_unwatch);
static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which,
u64 notify_id, u64 cookie, void *payload,
u32 payload_len)
{
struct ceph_osd_req_op *op;
struct ceph_pagelist *pl;
int ret;
op = osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0);
pl = ceph_pagelist_alloc(GFP_NOIO);
if (!pl)
return -ENOMEM;
ret = ceph_pagelist_encode_64(pl, notify_id);
ret |= ceph_pagelist_encode_64(pl, cookie);
if (payload) {
ret |= ceph_pagelist_encode_32(pl, payload_len);
ret |= ceph_pagelist_append(pl, payload, payload_len);
} else {
ret |= ceph_pagelist_encode_32(pl, 0);
}
if (ret) {
ceph_pagelist_release(pl);
return -ENOMEM;
}
ceph_osd_data_pagelist_init(&op->notify_ack.request_data, pl);
op->indata_len = pl->length;
return 0;
}
int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
struct ceph_object_id *oid,
struct ceph_object_locator *oloc,
u64 notify_id,
u64 cookie,
void *payload,
u32 payload_len)
{
struct ceph_osd_request *req;
int ret;
req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
if (!req)
return -ENOMEM;
ceph_oid_copy(&req->r_base_oid, oid);
ceph_oloc_copy(&req->r_base_oloc, oloc);
req->r_flags = CEPH_OSD_FLAG_READ;
ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload,
payload_len);
if (ret)
goto out_put_req;
ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
if (ret)
goto out_put_req;
ceph_osdc_start_request(osdc, req);
ret = ceph_osdc_wait_request(osdc, req);
out_put_req:
ceph_osdc_put_request(req);
return ret;
}
EXPORT_SYMBOL(ceph_osdc_notify_ack);
/*
* @timeout: in seconds
*
* @preply_{pages,len} are initialized both on success and error.
* The caller is responsible for:
*
* ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
*/
int ceph_osdc_notify(struct ceph_osd_client *osdc,
struct ceph_object_id *oid,
struct ceph_object_locator *oloc,
void *payload,
u32 payload_len,
u32 timeout,
struct page ***preply_pages,
size_t *preply_len)
{
struct ceph_osd_linger_request *lreq;
int ret;
WARN_ON(!timeout);
if (preply_pages) {
*preply_pages = NULL;
*preply_len = 0;
}
lreq = linger_alloc(osdc);
if (!lreq)
return -ENOMEM;
lreq->request_pl = ceph_pagelist_alloc(GFP_NOIO);
if (!lreq->request_pl) {
ret = -ENOMEM;
goto out_put_lreq;
}
ret = ceph_pagelist_encode_32(lreq->request_pl, 1); /* prot_ver */
ret |= ceph_pagelist_encode_32(lreq->request_pl, timeout);
ret |= ceph_pagelist_encode_32(lreq->request_pl, payload_len);
ret |= ceph_pagelist_append(lreq->request_pl, payload, payload_len);
if (ret) {
ret = -ENOMEM;
goto out_put_lreq;
}
/* for notify_id */
lreq->notify_id_pages = ceph_alloc_page_vector(1, GFP_NOIO);
if (IS_ERR(lreq->notify_id_pages)) {
ret = PTR_ERR(lreq->notify_id_pages);
lreq->notify_id_pages = NULL;
goto out_put_lreq;
}
lreq->preply_pages = preply_pages;
lreq->preply_len = preply_len;
ceph_oid_copy(&lreq->t.base_oid, oid);
ceph_oloc_copy(&lreq->t.base_oloc, oloc);
lreq->t.flags = CEPH_OSD_FLAG_READ;
linger_submit(lreq);
ret = linger_reg_commit_wait(lreq);
if (!ret)
ret = linger_notify_finish_wait(lreq,
msecs_to_jiffies(2 * timeout * MSEC_PER_SEC));
else
dout("lreq %p failed to initiate notify %d\n", lreq, ret);
linger_cancel(lreq);
out_put_lreq:
linger_put(lreq);
return ret;
}
EXPORT_SYMBOL(ceph_osdc_notify);
/*
* Return the number of milliseconds since the watch was last
* confirmed, or an error. If there is an error, the watch is no
* longer valid, and should be destroyed with ceph_osdc_unwatch().
*/
int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
struct ceph_osd_linger_request *lreq)
{
unsigned long stamp, age;
int ret;
down_read(&osdc->lock);
mutex_lock(&lreq->lock);
stamp = lreq->watch_valid_thru;
if (!list_empty(&lreq->pending_lworks)) {
struct linger_work *lwork =
list_first_entry(&lreq->pending_lworks,
struct linger_work,
pending_item);
if (time_before(lwork->queued_stamp, stamp))
stamp = lwork->queued_stamp;
}
age = jiffies - stamp;
dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__,
lreq, lreq->linger_id, age, lreq->last_error);
/* we are truncating to msecs, so return a safe upper bound */
ret = lreq->last_error ?: 1 + jiffies_to_msecs(age);
mutex_unlock(&lreq->lock);
up_read(&osdc->lock);
return ret;
}
static int decode_watcher(void **p, void *end, struct ceph_watch_item *item)
{
u8 struct_v;
u32 struct_len;
int ret;
ret = ceph_start_decoding(p, end, 2, "watch_item_t",
&struct_v, &struct_len);
if (ret)
goto bad;
ret = -EINVAL;
ceph_decode_copy_safe(p, end, &item->name, sizeof(item->name), bad);
ceph_decode_64_safe(p, end, item->cookie, bad);
ceph_decode_skip_32(p, end, bad); /* skip timeout seconds */
if (struct_v >= 2) {
ret = ceph_decode_entity_addr(p, end, &item->addr);
if (ret)
goto bad;
} else {
ret = 0;
}
dout("%s %s%llu cookie %llu addr %s\n", __func__,
ENTITY_NAME(item->name), item->cookie,
ceph_pr_addr(&item->addr));
bad:
return ret;
}
static int decode_watchers(void **p, void *end,
struct ceph_watch_item **watchers,
u32 *num_watchers)
{
u8 struct_v;
u32 struct_len;
int i;
int ret;
ret = ceph_start_decoding(p, end, 1, "obj_list_watch_response_t",
&struct_v, &struct_len);
if (ret)
return ret;
*num_watchers = ceph_decode_32(p);
*watchers = kcalloc(*num_watchers, sizeof(**watchers), GFP_NOIO);
if (!*watchers)
return -ENOMEM;
for (i = 0; i < *num_watchers; i++) {
ret = decode_watcher(p, end, *watchers + i);
if (ret) {
kfree(*watchers);
return ret;
}
}
return 0;
}
/*
* On success, the caller is responsible for:
*
* kfree(watchers);
*/
int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
struct ceph_object_id *oid,
struct ceph_object_locator *oloc,
struct ceph_watch_item **watchers,
u32 *num_watchers)
{
struct ceph_osd_request *req;
struct page **pages;
int ret;
req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
if (!req)
return -ENOMEM;
ceph_oid_copy(&req->r_base_oid, oid);
ceph_oloc_copy(&req->r_base_oloc, oloc);
req->r_flags = CEPH_OSD_FLAG_READ;
pages = ceph_alloc_page_vector(1, GFP_NOIO);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
goto out_put_req;
}
osd_req_op_init(req, 0, CEPH_OSD_OP_LIST_WATCHERS, 0);
ceph_osd_data_pages_init(osd_req_op_data(req, 0, list_watchers,
response_data),
pages, PAGE_SIZE, 0, false, true);
ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
if (ret)
goto out_put_req;
ceph_osdc_start_request(osdc, req);
ret = ceph_osdc_wait_request(osdc, req);
if (ret >= 0) {
void *p = page_address(pages[0]);
void *const end = p + req->r_ops[0].outdata_len;
ret = decode_watchers(&p, end, watchers, num_watchers);
}
out_put_req:
ceph_osdc_put_request(req);
return ret;
}
EXPORT_SYMBOL(ceph_osdc_list_watchers);
/*
* Call all pending notify callbacks - for use after a watch is
* unregistered, to make sure no more callbacks for it will be invoked
*/
void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
{
dout("%s osdc %p\n", __func__, osdc);
flush_workqueue(osdc->notify_wq);
}
EXPORT_SYMBOL(ceph_osdc_flush_notifies);
void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc)
{
down_read(&osdc->lock);
maybe_request_map(osdc);
up_read(&osdc->lock);
}
EXPORT_SYMBOL(ceph_osdc_maybe_request_map);
/*
* Execute an OSD class method on an object.
*
* @flags: CEPH_OSD_FLAG_*
* @resp_len: in/out param for reply length
*/
int ceph_osdc_call(struct ceph_osd_client *osdc,
struct ceph_object_id *oid,
struct ceph_object_locator *oloc,
const char *class, const char *method,
unsigned int flags,
struct page *req_page, size_t req_len,
struct page **resp_pages, size_t *resp_len)
{
struct ceph_osd_request *req;
int ret;
if (req_len > PAGE_SIZE)
return -E2BIG;
req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
if (!req)
return -ENOMEM;
ceph_oid_copy(&req->r_base_oid, oid);
ceph_oloc_copy(&req->r_base_oloc, oloc);
req->r_flags = flags;
ret = osd_req_op_cls_init(req, 0, class, method);
if (ret)
goto out_put_req;
if (req_page)
osd_req_op_cls_request_data_pages(req, 0, &req_page, req_len,
0, false, false);
if (resp_pages)
osd_req_op_cls_response_data_pages(req, 0, resp_pages,
*resp_len, 0, false, false);
ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
if (ret)
goto out_put_req;
ceph_osdc_start_request(osdc, req);
ret = ceph_osdc_wait_request(osdc, req);
if (ret >= 0) {
ret = req->r_ops[0].rval;
if (resp_pages)
*resp_len = req->r_ops[0].outdata_len;
}
out_put_req:
ceph_osdc_put_request(req);
return ret;
}
EXPORT_SYMBOL(ceph_osdc_call);
/*
* reset all osd connections
*/
void ceph_osdc_reopen_osds(struct ceph_osd_client *osdc)
{
struct rb_node *n;
down_write(&osdc->lock);
for (n = rb_first(&osdc->osds); n; ) {
struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
n = rb_next(n);
if (!reopen_osd(osd))
kick_osd_requests(osd);
}
up_write(&osdc->lock);
}
/*
* init, shutdown
*/
int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
{
int err;
dout("init\n");
osdc->client = client;
init_rwsem(&osdc->lock);
osdc->osds = RB_ROOT;
INIT_LIST_HEAD(&osdc->osd_lru);
spin_lock_init(&osdc->osd_lru_lock);
osd_init(&osdc->homeless_osd);
osdc->homeless_osd.o_osdc = osdc;
osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD;
osdc->last_linger_id = CEPH_LINGER_ID_START;
osdc->linger_requests = RB_ROOT;
osdc->map_checks = RB_ROOT;
osdc->linger_map_checks = RB_ROOT;
INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
err = -ENOMEM;
osdc->osdmap = ceph_osdmap_alloc();
if (!osdc->osdmap)
goto out;
osdc->req_mempool = mempool_create_slab_pool(10,
ceph_osd_request_cache);
if (!osdc->req_mempool)
goto out_map;
err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10, "osd_op");
if (err < 0)
goto out_mempool;
err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10,
"osd_op_reply");
if (err < 0)
goto out_msgpool;
err = -ENOMEM;
osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
if (!osdc->notify_wq)
goto out_msgpool_reply;
osdc->completion_wq = create_singlethread_workqueue("ceph-completion");
if (!osdc->completion_wq)
goto out_notify_wq;
schedule_delayed_work(&osdc->timeout_work,
osdc->client->options->osd_keepalive_timeout);
schedule_delayed_work(&osdc->osds_timeout_work,
round_jiffies_relative(osdc->client->options->osd_idle_ttl));
return 0;
out_notify_wq:
destroy_workqueue(osdc->notify_wq);
out_msgpool_reply:
ceph_msgpool_destroy(&osdc->msgpool_op_reply);
out_msgpool:
ceph_msgpool_destroy(&osdc->msgpool_op);
out_mempool:
mempool_destroy(osdc->req_mempool);
out_map:
ceph_osdmap_destroy(osdc->osdmap);
out:
return err;
}
void ceph_osdc_stop(struct ceph_osd_client *osdc)
{
destroy_workqueue(osdc->completion_wq);
destroy_workqueue(osdc->notify_wq);
cancel_delayed_work_sync(&osdc->timeout_work);
cancel_delayed_work_sync(&osdc->osds_timeout_work);
down_write(&osdc->lock);
while (!RB_EMPTY_ROOT(&osdc->osds)) {
struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
struct ceph_osd, o_node);
close_osd(osd);
}
up_write(&osdc->lock);
WARN_ON(refcount_read(&osdc->homeless_osd.o_ref) != 1);
osd_cleanup(&osdc->homeless_osd);
WARN_ON(!list_empty(&osdc->osd_lru));
WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests));
WARN_ON(!RB_EMPTY_ROOT(&osdc->map_checks));
WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_map_checks));
WARN_ON(atomic_read(&osdc->num_requests));
WARN_ON(atomic_read(&osdc->num_homeless));
ceph_osdmap_destroy(osdc->osdmap);
mempool_destroy(osdc->req_mempool);
ceph_msgpool_destroy(&osdc->msgpool_op);
ceph_msgpool_destroy(&osdc->msgpool_op_reply);
}
int osd_req_op_copy_from_init(struct ceph_osd_request *req,
u64 src_snapid, u64 src_version,
struct ceph_object_id *src_oid,
struct ceph_object_locator *src_oloc,
u32 src_fadvise_flags,
u32 dst_fadvise_flags,
u32 truncate_seq, u64 truncate_size,
u8 copy_from_flags)
{
struct ceph_osd_req_op *op;
struct page **pages;
void *p, *end;
pages = ceph_alloc_page_vector(1, GFP_KERNEL);
if (IS_ERR(pages))
return PTR_ERR(pages);
op = osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM2,
dst_fadvise_flags);
op->copy_from.snapid = src_snapid;
op->copy_from.src_version = src_version;
op->copy_from.flags = copy_from_flags;
op->copy_from.src_fadvise_flags = src_fadvise_flags;
p = page_address(pages[0]);
end = p + PAGE_SIZE;
ceph_encode_string(&p, end, src_oid->name, src_oid->name_len);
encode_oloc(&p, end, src_oloc);
ceph_encode_32(&p, truncate_seq);
ceph_encode_64(&p, truncate_size);
op->indata_len = PAGE_SIZE - (end - p);
ceph_osd_data_pages_init(&op->copy_from.osd_data, pages,
op->indata_len, 0, false, true);
return 0;
}
EXPORT_SYMBOL(osd_req_op_copy_from_init);
int __init ceph_osdc_setup(void)
{
size_t size = sizeof(struct ceph_osd_request) +
CEPH_OSD_SLAB_OPS * sizeof(struct ceph_osd_req_op);
BUG_ON(ceph_osd_request_cache);
ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", size,
0, 0, NULL);
return ceph_osd_request_cache ? 0 : -ENOMEM;
}
void ceph_osdc_cleanup(void)
{
BUG_ON(!ceph_osd_request_cache);
kmem_cache_destroy(ceph_osd_request_cache);
ceph_osd_request_cache = NULL;
}
/*
* handle incoming message
*/
static void osd_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
{
struct ceph_osd *osd = con->private;
struct ceph_osd_client *osdc = osd->o_osdc;
int type = le16_to_cpu(msg->hdr.type);
switch (type) {
case CEPH_MSG_OSD_MAP:
ceph_osdc_handle_map(osdc, msg);
break;
case CEPH_MSG_OSD_OPREPLY:
handle_reply(osd, msg);
break;
case CEPH_MSG_OSD_BACKOFF:
handle_backoff(osd, msg);
break;
case CEPH_MSG_WATCH_NOTIFY:
handle_watch_notify(osdc, msg);
break;
default:
pr_err("received unknown message type %d %s\n", type,
ceph_msg_type_name(type));
}
ceph_msg_put(msg);
}
/* How much sparse data was requested? */
static u64 sparse_data_requested(struct ceph_osd_request *req)
{
u64 len = 0;
if (req->r_flags & CEPH_OSD_FLAG_READ) {
int i;
for (i = 0; i < req->r_num_ops; ++i) {
struct ceph_osd_req_op *op = &req->r_ops[i];
if (op->op == CEPH_OSD_OP_SPARSE_READ)
len += op->extent.length;
}
}
return len;
}
/*
* Lookup and return message for incoming reply. Don't try to do
* anything about a larger than preallocated data portion of the
* message at the moment - for now, just skip the message.
*/
static struct ceph_msg *get_reply(struct ceph_connection *con,
struct ceph_msg_header *hdr,
int *skip)
{
struct ceph_osd *osd = con->private;
struct ceph_osd_client *osdc = osd->o_osdc;
struct ceph_msg *m = NULL;
struct ceph_osd_request *req;
int front_len = le32_to_cpu(hdr->front_len);
int data_len = le32_to_cpu(hdr->data_len);
u64 tid = le64_to_cpu(hdr->tid);
u64 srlen;
down_read(&osdc->lock);
if (!osd_registered(osd)) {
dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd);
*skip = 1;
goto out_unlock_osdc;
}
WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num));
mutex_lock(&osd->lock);
req = lookup_request(&osd->o_requests, tid);
if (!req) {
dout("%s osd%d tid %llu unknown, skipping\n", __func__,
osd->o_osd, tid);
*skip = 1;
goto out_unlock_session;
}
ceph_msg_revoke_incoming(req->r_reply);
if (front_len > req->r_reply->front_alloc_len) {
pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
__func__, osd->o_osd, req->r_tid, front_len,
req->r_reply->front_alloc_len);
m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
false);
if (!m)
goto out_unlock_session;
ceph_msg_put(req->r_reply);
req->r_reply = m;
}
srlen = sparse_data_requested(req);
if (!srlen && data_len > req->r_reply->data_length) {
pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
__func__, osd->o_osd, req->r_tid, data_len,
req->r_reply->data_length);
m = NULL;
*skip = 1;
goto out_unlock_session;
}
m = ceph_msg_get(req->r_reply);
m->sparse_read = (bool)srlen;
dout("get_reply tid %lld %p\n", tid, m);
out_unlock_session:
mutex_unlock(&osd->lock);
out_unlock_osdc:
up_read(&osdc->lock);
return m;
}
static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
{
struct ceph_msg *m;
int type = le16_to_cpu(hdr->type);
u32 front_len = le32_to_cpu(hdr->front_len);
u32 data_len = le32_to_cpu(hdr->data_len);
m = ceph_msg_new2(type, front_len, 1, GFP_NOIO, false);
if (!m)
return NULL;
if (data_len) {
struct page **pages;
pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
GFP_NOIO);
if (IS_ERR(pages)) {
ceph_msg_put(m);
return NULL;
}
ceph_msg_data_add_pages(m, pages, data_len, 0, true);
}
return m;
}
static struct ceph_msg *osd_alloc_msg(struct ceph_connection *con,
struct ceph_msg_header *hdr,
int *skip)
{
struct ceph_osd *osd = con->private;
int type = le16_to_cpu(hdr->type);
*skip = 0;
switch (type) {
case CEPH_MSG_OSD_MAP:
case CEPH_MSG_OSD_BACKOFF:
case CEPH_MSG_WATCH_NOTIFY:
return alloc_msg_with_page_vector(hdr);
case CEPH_MSG_OSD_OPREPLY:
return get_reply(con, hdr, skip);
default:
pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__,
osd->o_osd, type);
*skip = 1;
return NULL;
}
}
/*
* Wrappers to refcount containing ceph_osd struct
*/
static struct ceph_connection *osd_get_con(struct ceph_connection *con)
{
struct ceph_osd *osd = con->private;
if (get_osd(osd))
return con;
return NULL;
}
static void osd_put_con(struct ceph_connection *con)
{
struct ceph_osd *osd = con->private;
put_osd(osd);
}
/*
* authentication
*/
/*
* Note: returned pointer is the address of a structure that's
* managed separately. Caller must *not* attempt to free it.
*/
static struct ceph_auth_handshake *
osd_get_authorizer(struct ceph_connection *con, int *proto, int force_new)
{
struct ceph_osd *o = con->private;
struct ceph_osd_client *osdc = o->o_osdc;
struct ceph_auth_client *ac = osdc->client->monc.auth;
struct ceph_auth_handshake *auth = &o->o_auth;
int ret;
ret = __ceph_auth_get_authorizer(ac, auth, CEPH_ENTITY_TYPE_OSD,
force_new, proto, NULL, NULL);
if (ret)
return ERR_PTR(ret);
return auth;
}
static int osd_add_authorizer_challenge(struct ceph_connection *con,
void *challenge_buf, int challenge_buf_len)
{
struct ceph_osd *o = con->private;
struct ceph_osd_client *osdc = o->o_osdc;
struct ceph_auth_client *ac = osdc->client->monc.auth;
return ceph_auth_add_authorizer_challenge(ac, o->o_auth.authorizer,
challenge_buf, challenge_buf_len);
}
static int osd_verify_authorizer_reply(struct ceph_connection *con)
{
struct ceph_osd *o = con->private;
struct ceph_osd_client *osdc = o->o_osdc;
struct ceph_auth_client *ac = osdc->client->monc.auth;
struct ceph_auth_handshake *auth = &o->o_auth;
return ceph_auth_verify_authorizer_reply(ac, auth->authorizer,
auth->authorizer_reply_buf, auth->authorizer_reply_buf_len,
NULL, NULL, NULL, NULL);
}
static int osd_invalidate_authorizer(struct ceph_connection *con)
{
struct ceph_osd *o = con->private;
struct ceph_osd_client *osdc = o->o_osdc;
struct ceph_auth_client *ac = osdc->client->monc.auth;
ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
return ceph_monc_validate_auth(&osdc->client->monc);
}
static int osd_get_auth_request(struct ceph_connection *con,
void *buf, int *buf_len,
void **authorizer, int *authorizer_len)
{
struct ceph_osd *o = con->private;
struct ceph_auth_client *ac = o->o_osdc->client->monc.auth;
struct ceph_auth_handshake *auth = &o->o_auth;
int ret;
ret = ceph_auth_get_authorizer(ac, auth, CEPH_ENTITY_TYPE_OSD,
buf, buf_len);
if (ret)
return ret;
*authorizer = auth->authorizer_buf;
*authorizer_len = auth->authorizer_buf_len;
return 0;
}
static int osd_handle_auth_reply_more(struct ceph_connection *con,
void *reply, int reply_len,
void *buf, int *buf_len,
void **authorizer, int *authorizer_len)
{
struct ceph_osd *o = con->private;
struct ceph_auth_client *ac = o->o_osdc->client->monc.auth;
struct ceph_auth_handshake *auth = &o->o_auth;
int ret;
ret = ceph_auth_handle_svc_reply_more(ac, auth, reply, reply_len,
buf, buf_len);
if (ret)
return ret;
*authorizer = auth->authorizer_buf;
*authorizer_len = auth->authorizer_buf_len;
return 0;
}
static int osd_handle_auth_done(struct ceph_connection *con,
u64 global_id, void *reply, int reply_len,
u8 *session_key, int *session_key_len,
u8 *con_secret, int *con_secret_len)
{
struct ceph_osd *o = con->private;
struct ceph_auth_client *ac = o->o_osdc->client->monc.auth;
struct ceph_auth_handshake *auth = &o->o_auth;
return ceph_auth_handle_svc_reply_done(ac, auth, reply, reply_len,
session_key, session_key_len,
con_secret, con_secret_len);
}
static int osd_handle_auth_bad_method(struct ceph_connection *con,
int used_proto, int result,
const int *allowed_protos, int proto_cnt,
const int *allowed_modes, int mode_cnt)
{
struct ceph_osd *o = con->private;
struct ceph_mon_client *monc = &o->o_osdc->client->monc;
int ret;
if (ceph_auth_handle_bad_authorizer(monc->auth, CEPH_ENTITY_TYPE_OSD,
used_proto, result,
allowed_protos, proto_cnt,
allowed_modes, mode_cnt)) {
ret = ceph_monc_validate_auth(monc);
if (ret)
return ret;
}
return -EACCES;
}
static void osd_reencode_message(struct ceph_msg *msg)
{
int type = le16_to_cpu(msg->hdr.type);
if (type == CEPH_MSG_OSD_OP)
encode_request_finish(msg);
}
static int osd_sign_message(struct ceph_msg *msg)
{
struct ceph_osd *o = msg->con->private;
struct ceph_auth_handshake *auth = &o->o_auth;
return ceph_auth_sign_message(auth, msg);
}
static int osd_check_message_signature(struct ceph_msg *msg)
{
struct ceph_osd *o = msg->con->private;
struct ceph_auth_handshake *auth = &o->o_auth;
return ceph_auth_check_message_signature(auth, msg);
}
static void advance_cursor(struct ceph_msg_data_cursor *cursor, size_t len,
bool zero)
{
while (len) {
struct page *page;
size_t poff, plen;
page = ceph_msg_data_next(cursor, &poff, &plen);
if (plen > len)
plen = len;
if (zero)
zero_user_segment(page, poff, poff + plen);
len -= plen;
ceph_msg_data_advance(cursor, plen);
}
}
static int prep_next_sparse_read(struct ceph_connection *con,
struct ceph_msg_data_cursor *cursor)
{
struct ceph_osd *o = con->private;
struct ceph_sparse_read *sr = &o->o_sparse_read;
struct ceph_osd_request *req;
struct ceph_osd_req_op *op;
spin_lock(&o->o_requests_lock);
req = lookup_request(&o->o_requests, le64_to_cpu(con->in_msg->hdr.tid));
if (!req) {
spin_unlock(&o->o_requests_lock);
return -EBADR;
}
if (o->o_sparse_op_idx < 0) {
u64 srlen = sparse_data_requested(req);
dout("%s: [%d] starting new sparse read req. srlen=0x%llx\n",
__func__, o->o_osd, srlen);
ceph_msg_data_cursor_init(cursor, con->in_msg, srlen);
} else {
u64 end;
op = &req->r_ops[o->o_sparse_op_idx];
WARN_ON_ONCE(op->extent.sparse_ext);
/* hand back buffer we took earlier */
op->extent.sparse_ext = sr->sr_extent;
sr->sr_extent = NULL;
op->extent.sparse_ext_cnt = sr->sr_count;
sr->sr_ext_len = 0;
dout("%s: [%d] completed extent array len %d cursor->resid %zd\n",
__func__, o->o_osd, op->extent.sparse_ext_cnt, cursor->resid);
/* Advance to end of data for this operation */
end = ceph_sparse_ext_map_end(op);
if (end < sr->sr_req_len)
advance_cursor(cursor, sr->sr_req_len - end, false);
}
ceph_init_sparse_read(sr);
/* find next op in this request (if any) */
while (++o->o_sparse_op_idx < req->r_num_ops) {
op = &req->r_ops[o->o_sparse_op_idx];
if (op->op == CEPH_OSD_OP_SPARSE_READ)
goto found;
}
/* reset for next sparse read request */
spin_unlock(&o->o_requests_lock);
o->o_sparse_op_idx = -1;
return 0;
found:
sr->sr_req_off = op->extent.offset;
sr->sr_req_len = op->extent.length;
sr->sr_pos = sr->sr_req_off;
dout("%s: [%d] new sparse read op at idx %d 0x%llx~0x%llx\n", __func__,
o->o_osd, o->o_sparse_op_idx, sr->sr_req_off, sr->sr_req_len);
/* hand off request's sparse extent map buffer */
sr->sr_ext_len = op->extent.sparse_ext_cnt;
op->extent.sparse_ext_cnt = 0;
sr->sr_extent = op->extent.sparse_ext;
op->extent.sparse_ext = NULL;
spin_unlock(&o->o_requests_lock);
return 1;
}
#ifdef __BIG_ENDIAN
static inline void convert_extent_map(struct ceph_sparse_read *sr)
{
int i;
for (i = 0; i < sr->sr_count; i++) {
struct ceph_sparse_extent *ext = &sr->sr_extent[i];
ext->off = le64_to_cpu((__force __le64)ext->off);
ext->len = le64_to_cpu((__force __le64)ext->len);
}
}
#else
static inline void convert_extent_map(struct ceph_sparse_read *sr)
{
}
#endif
#define MAX_EXTENTS 4096
static int osd_sparse_read(struct ceph_connection *con,
struct ceph_msg_data_cursor *cursor,
char **pbuf)
{
struct ceph_osd *o = con->private;
struct ceph_sparse_read *sr = &o->o_sparse_read;
u32 count = sr->sr_count;
u64 eoff, elen;
int ret;
switch (sr->sr_state) {
case CEPH_SPARSE_READ_HDR:
next_op:
ret = prep_next_sparse_read(con, cursor);
if (ret <= 0)
return ret;
/* number of extents */
ret = sizeof(sr->sr_count);
*pbuf = (char *)&sr->sr_count;
sr->sr_state = CEPH_SPARSE_READ_EXTENTS;
break;
case CEPH_SPARSE_READ_EXTENTS:
/* Convert sr_count to host-endian */
count = le32_to_cpu((__force __le32)sr->sr_count);
sr->sr_count = count;
dout("[%d] got %u extents\n", o->o_osd, count);
if (count > 0) {
if (!sr->sr_extent || count > sr->sr_ext_len) {
/*
* Apply a hard cap to the number of extents.
* If we have more, assume something is wrong.
*/
if (count > MAX_EXTENTS) {
dout("%s: OSD returned 0x%x extents in a single reply!\n",
__func__, count);
return -EREMOTEIO;
}
/* no extent array provided, or too short */
kfree(sr->sr_extent);
sr->sr_extent = kmalloc_array(count,
sizeof(*sr->sr_extent),
GFP_NOIO);
if (!sr->sr_extent)
return -ENOMEM;
sr->sr_ext_len = count;
}
ret = count * sizeof(*sr->sr_extent);
*pbuf = (char *)sr->sr_extent;
sr->sr_state = CEPH_SPARSE_READ_DATA_LEN;
break;
}
/* No extents? Read data len */
fallthrough;
case CEPH_SPARSE_READ_DATA_LEN:
convert_extent_map(sr);
ret = sizeof(sr->sr_datalen);
*pbuf = (char *)&sr->sr_datalen;
sr->sr_state = CEPH_SPARSE_READ_DATA;
break;
case CEPH_SPARSE_READ_DATA:
if (sr->sr_index >= count) {
sr->sr_state = CEPH_SPARSE_READ_HDR;
goto next_op;
}
eoff = sr->sr_extent[sr->sr_index].off;
elen = sr->sr_extent[sr->sr_index].len;
dout("[%d] ext %d off 0x%llx len 0x%llx\n",
o->o_osd, sr->sr_index, eoff, elen);
if (elen > INT_MAX) {
dout("Sparse read extent length too long (0x%llx)\n",
elen);
return -EREMOTEIO;
}
/* zero out anything from sr_pos to start of extent */
if (sr->sr_pos < eoff)
advance_cursor(cursor, eoff - sr->sr_pos, true);
/* Set position to end of extent */
sr->sr_pos = eoff + elen;
/* send back the new length and nullify the ptr */
cursor->sr_resid = elen;
ret = elen;
*pbuf = NULL;
/* Bump the array index */
++sr->sr_index;
break;
}
return ret;
}
static const struct ceph_connection_operations osd_con_ops = {
.get = osd_get_con,
.put = osd_put_con,
.sparse_read = osd_sparse_read,
.alloc_msg = osd_alloc_msg,
.dispatch = osd_dispatch,
.fault = osd_fault,
.reencode_message = osd_reencode_message,
.get_authorizer = osd_get_authorizer,
.add_authorizer_challenge = osd_add_authorizer_challenge,
.verify_authorizer_reply = osd_verify_authorizer_reply,
.invalidate_authorizer = osd_invalidate_authorizer,
.sign_message = osd_sign_message,
.check_message_signature = osd_check_message_signature,
.get_auth_request = osd_get_auth_request,
.handle_auth_reply_more = osd_handle_auth_reply_more,
.handle_auth_done = osd_handle_auth_done,
.handle_auth_bad_method = osd_handle_auth_bad_method,
};
| linux-master | net/ceph/osd_client.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/ceph/ceph_debug.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/namei.h>
#include <linux/writeback.h>
#include <linux/ceph/libceph.h>
void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty)
{
int i;
for (i = 0; i < num_pages; i++) {
if (dirty)
set_page_dirty_lock(pages[i]);
put_page(pages[i]);
}
kvfree(pages);
}
EXPORT_SYMBOL(ceph_put_page_vector);
void ceph_release_page_vector(struct page **pages, int num_pages)
{
int i;
for (i = 0; i < num_pages; i++)
__free_pages(pages[i], 0);
kfree(pages);
}
EXPORT_SYMBOL(ceph_release_page_vector);
/*
* allocate a vector new pages
*/
struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags)
{
struct page **pages;
int i;
pages = kmalloc_array(num_pages, sizeof(*pages), flags);
if (!pages)
return ERR_PTR(-ENOMEM);
for (i = 0; i < num_pages; i++) {
pages[i] = __page_cache_alloc(flags);
if (pages[i] == NULL) {
ceph_release_page_vector(pages, i);
return ERR_PTR(-ENOMEM);
}
}
return pages;
}
EXPORT_SYMBOL(ceph_alloc_page_vector);
/*
* copy user data into a page vector
*/
int ceph_copy_user_to_page_vector(struct page **pages,
const void __user *data,
loff_t off, size_t len)
{
int i = 0;
int po = off & ~PAGE_MASK;
int left = len;
int l, bad;
while (left > 0) {
l = min_t(int, PAGE_SIZE-po, left);
bad = copy_from_user(page_address(pages[i]) + po, data, l);
if (bad == l)
return -EFAULT;
data += l - bad;
left -= l - bad;
po += l - bad;
if (po == PAGE_SIZE) {
po = 0;
i++;
}
}
return len;
}
EXPORT_SYMBOL(ceph_copy_user_to_page_vector);
void ceph_copy_to_page_vector(struct page **pages,
const void *data,
loff_t off, size_t len)
{
int i = 0;
size_t po = off & ~PAGE_MASK;
size_t left = len;
while (left > 0) {
size_t l = min_t(size_t, PAGE_SIZE-po, left);
memcpy(page_address(pages[i]) + po, data, l);
data += l;
left -= l;
po += l;
if (po == PAGE_SIZE) {
po = 0;
i++;
}
}
}
EXPORT_SYMBOL(ceph_copy_to_page_vector);
void ceph_copy_from_page_vector(struct page **pages,
void *data,
loff_t off, size_t len)
{
int i = 0;
size_t po = off & ~PAGE_MASK;
size_t left = len;
while (left > 0) {
size_t l = min_t(size_t, PAGE_SIZE-po, left);
memcpy(data, page_address(pages[i]) + po, l);
data += l;
left -= l;
po += l;
if (po == PAGE_SIZE) {
po = 0;
i++;
}
}
}
EXPORT_SYMBOL(ceph_copy_from_page_vector);
/*
* Zero an extent within a page vector. Offset is relative to the
* start of the first page.
*/
void ceph_zero_page_vector_range(int off, int len, struct page **pages)
{
int i = off >> PAGE_SHIFT;
off &= ~PAGE_MASK;
dout("zero_page_vector_page %u~%u\n", off, len);
/* leading partial page? */
if (off) {
int end = min((int)PAGE_SIZE, off + len);
dout("zeroing %d %p head from %d\n", i, pages[i],
(int)off);
zero_user_segment(pages[i], off, end);
len -= (end - off);
i++;
}
while (len >= PAGE_SIZE) {
dout("zeroing %d %p len=%d\n", i, pages[i], len);
zero_user_segment(pages[i], 0, PAGE_SIZE);
len -= PAGE_SIZE;
i++;
}
/* trailing partial page? */
if (len) {
dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len);
zero_user_segment(pages[i], 0, len);
}
}
EXPORT_SYMBOL(ceph_zero_page_vector_range);
| linux-master | net/ceph/pagevec.c |
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/ceph/ceph_debug.h>
#include <linux/math64.h>
#include <linux/slab.h>
#include <linux/ceph/striper.h>
#include <linux/ceph/types.h>
/*
* Map a file extent to a stripe unit within an object.
* Fill in objno, offset into object, and object extent length (i.e. the
* number of bytes mapped, less than or equal to @l->stripe_unit).
*
* Example for stripe_count = 3, stripes_per_object = 4:
*
* blockno | 0 3 6 9 | 1 4 7 10 | 2 5 8 11 | 12 15 18 21 | 13 16 19
* stripeno | 0 1 2 3 | 0 1 2 3 | 0 1 2 3 | 4 5 6 7 | 4 5 6
* stripepos | 0 | 1 | 2 | 0 | 1
* objno | 0 | 1 | 2 | 3 | 4
* objsetno | 0 | 1
*/
void ceph_calc_file_object_mapping(struct ceph_file_layout *l,
u64 off, u64 len,
u64 *objno, u64 *objoff, u32 *xlen)
{
u32 stripes_per_object = l->object_size / l->stripe_unit;
u64 blockno; /* which su in the file (i.e. globally) */
u32 blockoff; /* offset into su */
u64 stripeno; /* which stripe */
u32 stripepos; /* which su in the stripe,
which object in the object set */
u64 objsetno; /* which object set */
u32 objsetpos; /* which stripe in the object set */
blockno = div_u64_rem(off, l->stripe_unit, &blockoff);
stripeno = div_u64_rem(blockno, l->stripe_count, &stripepos);
objsetno = div_u64_rem(stripeno, stripes_per_object, &objsetpos);
*objno = objsetno * l->stripe_count + stripepos;
*objoff = objsetpos * l->stripe_unit + blockoff;
*xlen = min_t(u64, len, l->stripe_unit - blockoff);
}
EXPORT_SYMBOL(ceph_calc_file_object_mapping);
/*
* Return the last extent with given objno (@object_extents is sorted
* by objno). If not found, return NULL and set @add_pos so that the
* new extent can be added with list_add(add_pos, new_ex).
*/
static struct ceph_object_extent *
lookup_last(struct list_head *object_extents, u64 objno,
struct list_head **add_pos)
{
struct list_head *pos;
list_for_each_prev(pos, object_extents) {
struct ceph_object_extent *ex =
list_entry(pos, typeof(*ex), oe_item);
if (ex->oe_objno == objno)
return ex;
if (ex->oe_objno < objno)
break;
}
*add_pos = pos;
return NULL;
}
static struct ceph_object_extent *
lookup_containing(struct list_head *object_extents, u64 objno,
u64 objoff, u32 xlen)
{
struct ceph_object_extent *ex;
list_for_each_entry(ex, object_extents, oe_item) {
if (ex->oe_objno == objno &&
ex->oe_off <= objoff &&
ex->oe_off + ex->oe_len >= objoff + xlen) /* paranoia */
return ex;
if (ex->oe_objno > objno)
break;
}
return NULL;
}
/*
* Map a file extent to a sorted list of object extents.
*
* We want only one (or as few as possible) object extents per object.
* Adjacent object extents will be merged together, each returned object
* extent may reverse map to multiple different file extents.
*
* Call @alloc_fn for each new object extent and @action_fn for each
* mapped stripe unit, whether it was merged into an already allocated
* object extent or started a new object extent.
*
* Newly allocated object extents are added to @object_extents.
* To keep @object_extents sorted, successive calls to this function
* must map successive file extents (i.e. the list of file extents that
* are mapped using the same @object_extents must be sorted).
*
* The caller is responsible for @object_extents.
*/
int ceph_file_to_extents(struct ceph_file_layout *l, u64 off, u64 len,
struct list_head *object_extents,
struct ceph_object_extent *alloc_fn(void *arg),
void *alloc_arg,
ceph_object_extent_fn_t action_fn,
void *action_arg)
{
struct ceph_object_extent *last_ex, *ex;
while (len) {
struct list_head *add_pos = NULL;
u64 objno, objoff;
u32 xlen;
ceph_calc_file_object_mapping(l, off, len, &objno, &objoff,
&xlen);
last_ex = lookup_last(object_extents, objno, &add_pos);
if (!last_ex || last_ex->oe_off + last_ex->oe_len != objoff) {
ex = alloc_fn(alloc_arg);
if (!ex)
return -ENOMEM;
ex->oe_objno = objno;
ex->oe_off = objoff;
ex->oe_len = xlen;
if (action_fn)
action_fn(ex, xlen, action_arg);
if (!last_ex)
list_add(&ex->oe_item, add_pos);
else
list_add(&ex->oe_item, &last_ex->oe_item);
} else {
last_ex->oe_len += xlen;
if (action_fn)
action_fn(last_ex, xlen, action_arg);
}
off += xlen;
len -= xlen;
}
for (last_ex = list_first_entry(object_extents, typeof(*ex), oe_item),
ex = list_next_entry(last_ex, oe_item);
&ex->oe_item != object_extents;
last_ex = ex, ex = list_next_entry(ex, oe_item)) {
if (last_ex->oe_objno > ex->oe_objno ||
(last_ex->oe_objno == ex->oe_objno &&
last_ex->oe_off + last_ex->oe_len >= ex->oe_off)) {
WARN(1, "%s: object_extents list not sorted!\n",
__func__);
return -EINVAL;
}
}
return 0;
}
EXPORT_SYMBOL(ceph_file_to_extents);
/*
* A stripped down, non-allocating version of ceph_file_to_extents(),
* for when @object_extents is already populated.
*/
int ceph_iterate_extents(struct ceph_file_layout *l, u64 off, u64 len,
struct list_head *object_extents,
ceph_object_extent_fn_t action_fn,
void *action_arg)
{
while (len) {
struct ceph_object_extent *ex;
u64 objno, objoff;
u32 xlen;
ceph_calc_file_object_mapping(l, off, len, &objno, &objoff,
&xlen);
ex = lookup_containing(object_extents, objno, objoff, xlen);
if (!ex) {
WARN(1, "%s: objno %llu %llu~%u not found!\n",
__func__, objno, objoff, xlen);
return -EINVAL;
}
action_fn(ex, xlen, action_arg);
off += xlen;
len -= xlen;
}
return 0;
}
EXPORT_SYMBOL(ceph_iterate_extents);
/*
* Reverse map an object extent to a sorted list of file extents.
*
* On success, the caller is responsible for:
*
* kfree(file_extents)
*/
int ceph_extent_to_file(struct ceph_file_layout *l,
u64 objno, u64 objoff, u64 objlen,
struct ceph_file_extent **file_extents,
u32 *num_file_extents)
{
u32 stripes_per_object = l->object_size / l->stripe_unit;
u64 blockno; /* which su */
u32 blockoff; /* offset into su */
u64 stripeno; /* which stripe */
u32 stripepos; /* which su in the stripe,
which object in the object set */
u64 objsetno; /* which object set */
u32 i = 0;
if (!objlen) {
*file_extents = NULL;
*num_file_extents = 0;
return 0;
}
*num_file_extents = DIV_ROUND_UP_ULL(objoff + objlen, l->stripe_unit) -
DIV_ROUND_DOWN_ULL(objoff, l->stripe_unit);
*file_extents = kmalloc_array(*num_file_extents, sizeof(**file_extents),
GFP_NOIO);
if (!*file_extents)
return -ENOMEM;
div_u64_rem(objoff, l->stripe_unit, &blockoff);
while (objlen) {
u64 off, len;
objsetno = div_u64_rem(objno, l->stripe_count, &stripepos);
stripeno = div_u64(objoff, l->stripe_unit) +
objsetno * stripes_per_object;
blockno = stripeno * l->stripe_count + stripepos;
off = blockno * l->stripe_unit + blockoff;
len = min_t(u64, objlen, l->stripe_unit - blockoff);
(*file_extents)[i].fe_off = off;
(*file_extents)[i].fe_len = len;
blockoff = 0;
objoff += len;
objlen -= len;
i++;
}
BUG_ON(i != *num_file_extents);
return 0;
}
EXPORT_SYMBOL(ceph_extent_to_file);
u64 ceph_get_num_objects(struct ceph_file_layout *l, u64 size)
{
u64 period = (u64)l->stripe_count * l->object_size;
u64 num_periods = DIV64_U64_ROUND_UP(size, period);
u64 remainder_bytes;
u64 remainder_objs = 0;
div64_u64_rem(size, period, &remainder_bytes);
if (remainder_bytes > 0 &&
remainder_bytes < (u64)l->stripe_count * l->stripe_unit)
remainder_objs = l->stripe_count -
DIV_ROUND_UP_ULL(remainder_bytes, l->stripe_unit);
return num_periods * l->stripe_count - remainder_objs;
}
EXPORT_SYMBOL(ceph_get_num_objects);
| linux-master | net/ceph/striper.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/ceph/ceph_debug.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/ceph/decode.h>
#include <linux/ceph/auth.h>
#include <linux/ceph/ceph_features.h>
#include <linux/ceph/libceph.h>
#include <linux/ceph/messenger.h>
#include "crypto.h"
#include "auth_x.h"
#include "auth_x_protocol.h"
static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed);
static int ceph_x_is_authenticated(struct ceph_auth_client *ac)
{
struct ceph_x_info *xi = ac->private;
int missing;
int need; /* missing + need renewal */
ceph_x_validate_tickets(ac, &need);
missing = ac->want_keys & ~xi->have_keys;
WARN_ON((need & missing) != missing);
dout("%s want 0x%x have 0x%x missing 0x%x -> %d\n", __func__,
ac->want_keys, xi->have_keys, missing, !missing);
return !missing;
}
static int ceph_x_should_authenticate(struct ceph_auth_client *ac)
{
struct ceph_x_info *xi = ac->private;
int need;
ceph_x_validate_tickets(ac, &need);
dout("%s want 0x%x have 0x%x need 0x%x -> %d\n", __func__,
ac->want_keys, xi->have_keys, need, !!need);
return !!need;
}
static int ceph_x_encrypt_offset(void)
{
return sizeof(u32) + sizeof(struct ceph_x_encrypt_header);
}
static int ceph_x_encrypt_buflen(int ilen)
{
return ceph_x_encrypt_offset() + ilen + 16;
}
static int ceph_x_encrypt(struct ceph_crypto_key *secret, void *buf,
int buf_len, int plaintext_len)
{
struct ceph_x_encrypt_header *hdr = buf + sizeof(u32);
int ciphertext_len;
int ret;
hdr->struct_v = 1;
hdr->magic = cpu_to_le64(CEPHX_ENC_MAGIC);
ret = ceph_crypt(secret, true, buf + sizeof(u32), buf_len - sizeof(u32),
plaintext_len + sizeof(struct ceph_x_encrypt_header),
&ciphertext_len);
if (ret)
return ret;
ceph_encode_32(&buf, ciphertext_len);
return sizeof(u32) + ciphertext_len;
}
static int __ceph_x_decrypt(struct ceph_crypto_key *secret, void *p,
int ciphertext_len)
{
struct ceph_x_encrypt_header *hdr = p;
int plaintext_len;
int ret;
ret = ceph_crypt(secret, false, p, ciphertext_len, ciphertext_len,
&plaintext_len);
if (ret)
return ret;
if (le64_to_cpu(hdr->magic) != CEPHX_ENC_MAGIC) {
pr_err("%s bad magic\n", __func__);
return -EINVAL;
}
return plaintext_len - sizeof(*hdr);
}
static int ceph_x_decrypt(struct ceph_crypto_key *secret, void **p, void *end)
{
int ciphertext_len;
int ret;
ceph_decode_32_safe(p, end, ciphertext_len, e_inval);
ceph_decode_need(p, end, ciphertext_len, e_inval);
ret = __ceph_x_decrypt(secret, *p, ciphertext_len);
if (ret < 0)
return ret;
*p += ciphertext_len;
return ret;
e_inval:
return -EINVAL;
}
/*
* get existing (or insert new) ticket handler
*/
static struct ceph_x_ticket_handler *
get_ticket_handler(struct ceph_auth_client *ac, int service)
{
struct ceph_x_ticket_handler *th;
struct ceph_x_info *xi = ac->private;
struct rb_node *parent = NULL, **p = &xi->ticket_handlers.rb_node;
while (*p) {
parent = *p;
th = rb_entry(parent, struct ceph_x_ticket_handler, node);
if (service < th->service)
p = &(*p)->rb_left;
else if (service > th->service)
p = &(*p)->rb_right;
else
return th;
}
/* add it */
th = kzalloc(sizeof(*th), GFP_NOFS);
if (!th)
return ERR_PTR(-ENOMEM);
th->service = service;
rb_link_node(&th->node, parent, p);
rb_insert_color(&th->node, &xi->ticket_handlers);
return th;
}
static void remove_ticket_handler(struct ceph_auth_client *ac,
struct ceph_x_ticket_handler *th)
{
struct ceph_x_info *xi = ac->private;
dout("remove_ticket_handler %p %d\n", th, th->service);
rb_erase(&th->node, &xi->ticket_handlers);
ceph_crypto_key_destroy(&th->session_key);
if (th->ticket_blob)
ceph_buffer_put(th->ticket_blob);
kfree(th);
}
static int process_one_ticket(struct ceph_auth_client *ac,
struct ceph_crypto_key *secret,
void **p, void *end)
{
struct ceph_x_info *xi = ac->private;
int type;
u8 tkt_struct_v, blob_struct_v;
struct ceph_x_ticket_handler *th;
void *dp, *dend;
int dlen;
char is_enc;
struct timespec64 validity;
void *tp, *tpend;
void **ptp;
struct ceph_crypto_key new_session_key = { 0 };
struct ceph_buffer *new_ticket_blob;
time64_t new_expires, new_renew_after;
u64 new_secret_id;
int ret;
ceph_decode_need(p, end, sizeof(u32) + 1, bad);
type = ceph_decode_32(p);
dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
tkt_struct_v = ceph_decode_8(p);
if (tkt_struct_v != 1)
goto bad;
th = get_ticket_handler(ac, type);
if (IS_ERR(th)) {
ret = PTR_ERR(th);
goto out;
}
/* blob for me */
dp = *p + ceph_x_encrypt_offset();
ret = ceph_x_decrypt(secret, p, end);
if (ret < 0)
goto out;
dout(" decrypted %d bytes\n", ret);
dend = dp + ret;
ceph_decode_8_safe(&dp, dend, tkt_struct_v, bad);
if (tkt_struct_v != 1)
goto bad;
ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
if (ret)
goto out;
ceph_decode_need(&dp, dend, sizeof(struct ceph_timespec), bad);
ceph_decode_timespec64(&validity, dp);
dp += sizeof(struct ceph_timespec);
new_expires = ktime_get_real_seconds() + validity.tv_sec;
new_renew_after = new_expires - (validity.tv_sec / 4);
dout(" expires=%llu renew_after=%llu\n", new_expires,
new_renew_after);
/* ticket blob for service */
ceph_decode_8_safe(p, end, is_enc, bad);
if (is_enc) {
/* encrypted */
tp = *p + ceph_x_encrypt_offset();
ret = ceph_x_decrypt(&th->session_key, p, end);
if (ret < 0)
goto out;
dout(" encrypted ticket, decrypted %d bytes\n", ret);
ptp = &tp;
tpend = tp + ret;
} else {
/* unencrypted */
ptp = p;
tpend = end;
}
ceph_decode_32_safe(ptp, tpend, dlen, bad);
dout(" ticket blob is %d bytes\n", dlen);
ceph_decode_need(ptp, tpend, 1 + sizeof(u64), bad);
blob_struct_v = ceph_decode_8(ptp);
if (blob_struct_v != 1)
goto bad;
new_secret_id = ceph_decode_64(ptp);
ret = ceph_decode_buffer(&new_ticket_blob, ptp, tpend);
if (ret)
goto out;
/* all is well, update our ticket */
ceph_crypto_key_destroy(&th->session_key);
if (th->ticket_blob)
ceph_buffer_put(th->ticket_blob);
th->session_key = new_session_key;
th->ticket_blob = new_ticket_blob;
th->secret_id = new_secret_id;
th->expires = new_expires;
th->renew_after = new_renew_after;
th->have_key = true;
dout(" got ticket service %d (%s) secret_id %lld len %d\n",
type, ceph_entity_type_name(type), th->secret_id,
(int)th->ticket_blob->vec.iov_len);
xi->have_keys |= th->service;
return 0;
bad:
ret = -EINVAL;
out:
ceph_crypto_key_destroy(&new_session_key);
return ret;
}
static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
struct ceph_crypto_key *secret,
void **p, void *end)
{
u8 reply_struct_v;
u32 num;
int ret;
ceph_decode_8_safe(p, end, reply_struct_v, bad);
if (reply_struct_v != 1)
return -EINVAL;
ceph_decode_32_safe(p, end, num, bad);
dout("%d tickets\n", num);
while (num--) {
ret = process_one_ticket(ac, secret, p, end);
if (ret)
return ret;
}
return 0;
bad:
return -EINVAL;
}
/*
* Encode and encrypt the second part (ceph_x_authorize_b) of the
* authorizer. The first part (ceph_x_authorize_a) should already be
* encoded.
*/
static int encrypt_authorizer(struct ceph_x_authorizer *au,
u64 *server_challenge)
{
struct ceph_x_authorize_a *msg_a;
struct ceph_x_authorize_b *msg_b;
void *p, *end;
int ret;
msg_a = au->buf->vec.iov_base;
WARN_ON(msg_a->ticket_blob.secret_id != cpu_to_le64(au->secret_id));
p = (void *)(msg_a + 1) + le32_to_cpu(msg_a->ticket_blob.blob_len);
end = au->buf->vec.iov_base + au->buf->vec.iov_len;
msg_b = p + ceph_x_encrypt_offset();
msg_b->struct_v = 2;
msg_b->nonce = cpu_to_le64(au->nonce);
if (server_challenge) {
msg_b->have_challenge = 1;
msg_b->server_challenge_plus_one =
cpu_to_le64(*server_challenge + 1);
} else {
msg_b->have_challenge = 0;
msg_b->server_challenge_plus_one = 0;
}
ret = ceph_x_encrypt(&au->session_key, p, end - p, sizeof(*msg_b));
if (ret < 0)
return ret;
p += ret;
if (server_challenge) {
WARN_ON(p != end);
} else {
WARN_ON(p > end);
au->buf->vec.iov_len = p - au->buf->vec.iov_base;
}
return 0;
}
static void ceph_x_authorizer_cleanup(struct ceph_x_authorizer *au)
{
ceph_crypto_key_destroy(&au->session_key);
if (au->buf) {
ceph_buffer_put(au->buf);
au->buf = NULL;
}
}
static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
struct ceph_x_ticket_handler *th,
struct ceph_x_authorizer *au)
{
int maxlen;
struct ceph_x_authorize_a *msg_a;
struct ceph_x_authorize_b *msg_b;
int ret;
int ticket_blob_len =
(th->ticket_blob ? th->ticket_blob->vec.iov_len : 0);
dout("build_authorizer for %s %p\n",
ceph_entity_type_name(th->service), au);
ceph_crypto_key_destroy(&au->session_key);
ret = ceph_crypto_key_clone(&au->session_key, &th->session_key);
if (ret)
goto out_au;
maxlen = sizeof(*msg_a) + ticket_blob_len +
ceph_x_encrypt_buflen(sizeof(*msg_b));
dout(" need len %d\n", maxlen);
if (au->buf && au->buf->alloc_len < maxlen) {
ceph_buffer_put(au->buf);
au->buf = NULL;
}
if (!au->buf) {
au->buf = ceph_buffer_new(maxlen, GFP_NOFS);
if (!au->buf) {
ret = -ENOMEM;
goto out_au;
}
}
au->service = th->service;
WARN_ON(!th->secret_id);
au->secret_id = th->secret_id;
msg_a = au->buf->vec.iov_base;
msg_a->struct_v = 1;
msg_a->global_id = cpu_to_le64(ac->global_id);
msg_a->service_id = cpu_to_le32(th->service);
msg_a->ticket_blob.struct_v = 1;
msg_a->ticket_blob.secret_id = cpu_to_le64(th->secret_id);
msg_a->ticket_blob.blob_len = cpu_to_le32(ticket_blob_len);
if (ticket_blob_len) {
memcpy(msg_a->ticket_blob.blob, th->ticket_blob->vec.iov_base,
th->ticket_blob->vec.iov_len);
}
dout(" th %p secret_id %lld %lld\n", th, th->secret_id,
le64_to_cpu(msg_a->ticket_blob.secret_id));
get_random_bytes(&au->nonce, sizeof(au->nonce));
ret = encrypt_authorizer(au, NULL);
if (ret) {
pr_err("failed to encrypt authorizer: %d", ret);
goto out_au;
}
dout(" built authorizer nonce %llx len %d\n", au->nonce,
(int)au->buf->vec.iov_len);
return 0;
out_au:
ceph_x_authorizer_cleanup(au);
return ret;
}
static int ceph_x_encode_ticket(struct ceph_x_ticket_handler *th,
void **p, void *end)
{
ceph_decode_need(p, end, 1 + sizeof(u64), bad);
ceph_encode_8(p, 1);
ceph_encode_64(p, th->secret_id);
if (th->ticket_blob) {
const char *buf = th->ticket_blob->vec.iov_base;
u32 len = th->ticket_blob->vec.iov_len;
ceph_encode_32_safe(p, end, len, bad);
ceph_encode_copy_safe(p, end, buf, len, bad);
} else {
ceph_encode_32_safe(p, end, 0, bad);
}
return 0;
bad:
return -ERANGE;
}
static bool need_key(struct ceph_x_ticket_handler *th)
{
if (!th->have_key)
return true;
return ktime_get_real_seconds() >= th->renew_after;
}
static bool have_key(struct ceph_x_ticket_handler *th)
{
if (th->have_key && ktime_get_real_seconds() >= th->expires) {
dout("ticket %d (%s) secret_id %llu expired\n", th->service,
ceph_entity_type_name(th->service), th->secret_id);
th->have_key = false;
}
return th->have_key;
}
static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed)
{
int want = ac->want_keys;
struct ceph_x_info *xi = ac->private;
int service;
*pneed = ac->want_keys & ~(xi->have_keys);
for (service = 1; service <= want; service <<= 1) {
struct ceph_x_ticket_handler *th;
if (!(ac->want_keys & service))
continue;
if (*pneed & service)
continue;
th = get_ticket_handler(ac, service);
if (IS_ERR(th)) {
*pneed |= service;
continue;
}
if (need_key(th))
*pneed |= service;
if (!have_key(th))
xi->have_keys &= ~service;
}
}
static int ceph_x_build_request(struct ceph_auth_client *ac,
void *buf, void *end)
{
struct ceph_x_info *xi = ac->private;
int need;
struct ceph_x_request_header *head = buf;
void *p;
int ret;
struct ceph_x_ticket_handler *th =
get_ticket_handler(ac, CEPH_ENTITY_TYPE_AUTH);
if (IS_ERR(th))
return PTR_ERR(th);
ceph_x_validate_tickets(ac, &need);
dout("%s want 0x%x have 0x%x need 0x%x\n", __func__, ac->want_keys,
xi->have_keys, need);
if (need & CEPH_ENTITY_TYPE_AUTH) {
struct ceph_x_authenticate *auth = (void *)(head + 1);
void *enc_buf = xi->auth_authorizer.enc_buf;
struct ceph_x_challenge_blob *blob = enc_buf +
ceph_x_encrypt_offset();
u64 *u;
p = auth + 1;
if (p > end)
return -ERANGE;
dout(" get_auth_session_key\n");
head->op = cpu_to_le16(CEPHX_GET_AUTH_SESSION_KEY);
/* encrypt and hash */
get_random_bytes(&auth->client_challenge, sizeof(u64));
blob->client_challenge = auth->client_challenge;
blob->server_challenge = cpu_to_le64(xi->server_challenge);
ret = ceph_x_encrypt(&xi->secret, enc_buf, CEPHX_AU_ENC_BUF_LEN,
sizeof(*blob));
if (ret < 0)
return ret;
auth->struct_v = 3; /* nautilus+ */
auth->key = 0;
for (u = (u64 *)enc_buf; u + 1 <= (u64 *)(enc_buf + ret); u++)
auth->key ^= *(__le64 *)u;
dout(" server_challenge %llx client_challenge %llx key %llx\n",
xi->server_challenge, le64_to_cpu(auth->client_challenge),
le64_to_cpu(auth->key));
/* now encode the old ticket if exists */
ret = ceph_x_encode_ticket(th, &p, end);
if (ret < 0)
return ret;
/* nautilus+: request service tickets at the same time */
need = ac->want_keys & ~CEPH_ENTITY_TYPE_AUTH;
WARN_ON(!need);
ceph_encode_32_safe(&p, end, need, e_range);
return p - buf;
}
if (need) {
dout(" get_principal_session_key\n");
ret = ceph_x_build_authorizer(ac, th, &xi->auth_authorizer);
if (ret)
return ret;
p = buf;
ceph_encode_16_safe(&p, end, CEPHX_GET_PRINCIPAL_SESSION_KEY,
e_range);
ceph_encode_copy_safe(&p, end,
xi->auth_authorizer.buf->vec.iov_base,
xi->auth_authorizer.buf->vec.iov_len, e_range);
ceph_encode_8_safe(&p, end, 1, e_range);
ceph_encode_32_safe(&p, end, need, e_range);
return p - buf;
}
return 0;
e_range:
return -ERANGE;
}
static int decode_con_secret(void **p, void *end, u8 *con_secret,
int *con_secret_len)
{
int len;
ceph_decode_32_safe(p, end, len, bad);
ceph_decode_need(p, end, len, bad);
dout("%s len %d\n", __func__, len);
if (con_secret) {
if (len > CEPH_MAX_CON_SECRET_LEN) {
pr_err("connection secret too big %d\n", len);
goto bad_memzero;
}
memcpy(con_secret, *p, len);
*con_secret_len = len;
}
memzero_explicit(*p, len);
*p += len;
return 0;
bad_memzero:
memzero_explicit(*p, len);
bad:
pr_err("failed to decode connection secret\n");
return -EINVAL;
}
static int handle_auth_session_key(struct ceph_auth_client *ac, u64 global_id,
void **p, void *end,
u8 *session_key, int *session_key_len,
u8 *con_secret, int *con_secret_len)
{
struct ceph_x_info *xi = ac->private;
struct ceph_x_ticket_handler *th;
void *dp, *dend;
int len;
int ret;
/* AUTH ticket */
ret = ceph_x_proc_ticket_reply(ac, &xi->secret, p, end);
if (ret)
return ret;
ceph_auth_set_global_id(ac, global_id);
if (*p == end) {
/* pre-nautilus (or didn't request service tickets!) */
WARN_ON(session_key || con_secret);
return 0;
}
th = get_ticket_handler(ac, CEPH_ENTITY_TYPE_AUTH);
if (IS_ERR(th))
return PTR_ERR(th);
if (session_key) {
memcpy(session_key, th->session_key.key, th->session_key.len);
*session_key_len = th->session_key.len;
}
/* connection secret */
ceph_decode_32_safe(p, end, len, e_inval);
dout("%s connection secret blob len %d\n", __func__, len);
if (len > 0) {
dp = *p + ceph_x_encrypt_offset();
ret = ceph_x_decrypt(&th->session_key, p, *p + len);
if (ret < 0)
return ret;
dout("%s decrypted %d bytes\n", __func__, ret);
dend = dp + ret;
ret = decode_con_secret(&dp, dend, con_secret, con_secret_len);
if (ret)
return ret;
}
/* service tickets */
ceph_decode_32_safe(p, end, len, e_inval);
dout("%s service tickets blob len %d\n", __func__, len);
if (len > 0) {
ret = ceph_x_proc_ticket_reply(ac, &th->session_key,
p, *p + len);
if (ret)
return ret;
}
return 0;
e_inval:
return -EINVAL;
}
static int ceph_x_handle_reply(struct ceph_auth_client *ac, u64 global_id,
void *buf, void *end,
u8 *session_key, int *session_key_len,
u8 *con_secret, int *con_secret_len)
{
struct ceph_x_info *xi = ac->private;
struct ceph_x_ticket_handler *th;
int len = end - buf;
int result;
void *p;
int op;
int ret;
if (xi->starting) {
/* it's a hello */
struct ceph_x_server_challenge *sc = buf;
if (len != sizeof(*sc))
return -EINVAL;
xi->server_challenge = le64_to_cpu(sc->server_challenge);
dout("handle_reply got server challenge %llx\n",
xi->server_challenge);
xi->starting = false;
xi->have_keys &= ~CEPH_ENTITY_TYPE_AUTH;
return -EAGAIN;
}
p = buf;
ceph_decode_16_safe(&p, end, op, e_inval);
ceph_decode_32_safe(&p, end, result, e_inval);
dout("handle_reply op %d result %d\n", op, result);
switch (op) {
case CEPHX_GET_AUTH_SESSION_KEY:
/* AUTH ticket + [connection secret] + service tickets */
ret = handle_auth_session_key(ac, global_id, &p, end,
session_key, session_key_len,
con_secret, con_secret_len);
break;
case CEPHX_GET_PRINCIPAL_SESSION_KEY:
th = get_ticket_handler(ac, CEPH_ENTITY_TYPE_AUTH);
if (IS_ERR(th))
return PTR_ERR(th);
/* service tickets */
ret = ceph_x_proc_ticket_reply(ac, &th->session_key, &p, end);
break;
default:
return -EINVAL;
}
if (ret)
return ret;
if (ac->want_keys == xi->have_keys)
return 0;
return -EAGAIN;
e_inval:
return -EINVAL;
}
static void ceph_x_destroy_authorizer(struct ceph_authorizer *a)
{
struct ceph_x_authorizer *au = (void *)a;
ceph_x_authorizer_cleanup(au);
kfree(au);
}
static int ceph_x_create_authorizer(
struct ceph_auth_client *ac, int peer_type,
struct ceph_auth_handshake *auth)
{
struct ceph_x_authorizer *au;
struct ceph_x_ticket_handler *th;
int ret;
th = get_ticket_handler(ac, peer_type);
if (IS_ERR(th))
return PTR_ERR(th);
au = kzalloc(sizeof(*au), GFP_NOFS);
if (!au)
return -ENOMEM;
au->base.destroy = ceph_x_destroy_authorizer;
ret = ceph_x_build_authorizer(ac, th, au);
if (ret) {
kfree(au);
return ret;
}
auth->authorizer = (struct ceph_authorizer *) au;
auth->authorizer_buf = au->buf->vec.iov_base;
auth->authorizer_buf_len = au->buf->vec.iov_len;
auth->authorizer_reply_buf = au->enc_buf;
auth->authorizer_reply_buf_len = CEPHX_AU_ENC_BUF_LEN;
auth->sign_message = ac->ops->sign_message;
auth->check_message_signature = ac->ops->check_message_signature;
return 0;
}
static int ceph_x_update_authorizer(
struct ceph_auth_client *ac, int peer_type,
struct ceph_auth_handshake *auth)
{
struct ceph_x_authorizer *au;
struct ceph_x_ticket_handler *th;
th = get_ticket_handler(ac, peer_type);
if (IS_ERR(th))
return PTR_ERR(th);
au = (struct ceph_x_authorizer *)auth->authorizer;
if (au->secret_id < th->secret_id) {
dout("ceph_x_update_authorizer service %u secret %llu < %llu\n",
au->service, au->secret_id, th->secret_id);
return ceph_x_build_authorizer(ac, th, au);
}
return 0;
}
/*
* CephXAuthorizeChallenge
*/
static int decrypt_authorizer_challenge(struct ceph_crypto_key *secret,
void *challenge, int challenge_len,
u64 *server_challenge)
{
void *dp, *dend;
int ret;
/* no leading len */
ret = __ceph_x_decrypt(secret, challenge, challenge_len);
if (ret < 0)
return ret;
dout("%s decrypted %d bytes\n", __func__, ret);
dp = challenge + sizeof(struct ceph_x_encrypt_header);
dend = dp + ret;
ceph_decode_skip_8(&dp, dend, e_inval); /* struct_v */
ceph_decode_64_safe(&dp, dend, *server_challenge, e_inval);
dout("%s server_challenge %llu\n", __func__, *server_challenge);
return 0;
e_inval:
return -EINVAL;
}
static int ceph_x_add_authorizer_challenge(struct ceph_auth_client *ac,
struct ceph_authorizer *a,
void *challenge, int challenge_len)
{
struct ceph_x_authorizer *au = (void *)a;
u64 server_challenge;
int ret;
ret = decrypt_authorizer_challenge(&au->session_key, challenge,
challenge_len, &server_challenge);
if (ret) {
pr_err("failed to decrypt authorize challenge: %d", ret);
return ret;
}
ret = encrypt_authorizer(au, &server_challenge);
if (ret) {
pr_err("failed to encrypt authorizer w/ challenge: %d", ret);
return ret;
}
return 0;
}
/*
* CephXAuthorizeReply
*/
static int decrypt_authorizer_reply(struct ceph_crypto_key *secret,
void **p, void *end, u64 *nonce_plus_one,
u8 *con_secret, int *con_secret_len)
{
void *dp, *dend;
u8 struct_v;
int ret;
dp = *p + ceph_x_encrypt_offset();
ret = ceph_x_decrypt(secret, p, end);
if (ret < 0)
return ret;
dout("%s decrypted %d bytes\n", __func__, ret);
dend = dp + ret;
ceph_decode_8_safe(&dp, dend, struct_v, e_inval);
ceph_decode_64_safe(&dp, dend, *nonce_plus_one, e_inval);
dout("%s nonce_plus_one %llu\n", __func__, *nonce_plus_one);
if (struct_v >= 2) {
ret = decode_con_secret(&dp, dend, con_secret, con_secret_len);
if (ret)
return ret;
}
return 0;
e_inval:
return -EINVAL;
}
static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
struct ceph_authorizer *a,
void *reply, int reply_len,
u8 *session_key, int *session_key_len,
u8 *con_secret, int *con_secret_len)
{
struct ceph_x_authorizer *au = (void *)a;
u64 nonce_plus_one;
int ret;
if (session_key) {
memcpy(session_key, au->session_key.key, au->session_key.len);
*session_key_len = au->session_key.len;
}
ret = decrypt_authorizer_reply(&au->session_key, &reply,
reply + reply_len, &nonce_plus_one,
con_secret, con_secret_len);
if (ret)
return ret;
if (nonce_plus_one != au->nonce + 1) {
pr_err("failed to authenticate server\n");
return -EPERM;
}
return 0;
}
static void ceph_x_reset(struct ceph_auth_client *ac)
{
struct ceph_x_info *xi = ac->private;
dout("reset\n");
xi->starting = true;
xi->server_challenge = 0;
}
static void ceph_x_destroy(struct ceph_auth_client *ac)
{
struct ceph_x_info *xi = ac->private;
struct rb_node *p;
dout("ceph_x_destroy %p\n", ac);
ceph_crypto_key_destroy(&xi->secret);
while ((p = rb_first(&xi->ticket_handlers)) != NULL) {
struct ceph_x_ticket_handler *th =
rb_entry(p, struct ceph_x_ticket_handler, node);
remove_ticket_handler(ac, th);
}
ceph_x_authorizer_cleanup(&xi->auth_authorizer);
kfree(ac->private);
ac->private = NULL;
}
static void invalidate_ticket(struct ceph_auth_client *ac, int peer_type)
{
struct ceph_x_ticket_handler *th;
th = get_ticket_handler(ac, peer_type);
if (IS_ERR(th))
return;
if (th->have_key) {
dout("ticket %d (%s) secret_id %llu invalidated\n",
th->service, ceph_entity_type_name(th->service),
th->secret_id);
th->have_key = false;
}
}
static void ceph_x_invalidate_authorizer(struct ceph_auth_client *ac,
int peer_type)
{
/*
* We are to invalidate a service ticket in the hopes of
* getting a new, hopefully more valid, one. But, we won't get
* it unless our AUTH ticket is good, so invalidate AUTH ticket
* as well, just in case.
*/
invalidate_ticket(ac, peer_type);
invalidate_ticket(ac, CEPH_ENTITY_TYPE_AUTH);
}
static int calc_signature(struct ceph_x_authorizer *au, struct ceph_msg *msg,
__le64 *psig)
{
void *enc_buf = au->enc_buf;
int ret;
if (!CEPH_HAVE_FEATURE(msg->con->peer_features, CEPHX_V2)) {
struct {
__le32 len;
__le32 header_crc;
__le32 front_crc;
__le32 middle_crc;
__le32 data_crc;
} __packed *sigblock = enc_buf + ceph_x_encrypt_offset();
sigblock->len = cpu_to_le32(4*sizeof(u32));
sigblock->header_crc = msg->hdr.crc;
sigblock->front_crc = msg->footer.front_crc;
sigblock->middle_crc = msg->footer.middle_crc;
sigblock->data_crc = msg->footer.data_crc;
ret = ceph_x_encrypt(&au->session_key, enc_buf,
CEPHX_AU_ENC_BUF_LEN, sizeof(*sigblock));
if (ret < 0)
return ret;
*psig = *(__le64 *)(enc_buf + sizeof(u32));
} else {
struct {
__le32 header_crc;
__le32 front_crc;
__le32 front_len;
__le32 middle_crc;
__le32 middle_len;
__le32 data_crc;
__le32 data_len;
__le32 seq_lower_word;
} __packed *sigblock = enc_buf;
struct {
__le64 a, b, c, d;
} __packed *penc = enc_buf;
int ciphertext_len;
sigblock->header_crc = msg->hdr.crc;
sigblock->front_crc = msg->footer.front_crc;
sigblock->front_len = msg->hdr.front_len;
sigblock->middle_crc = msg->footer.middle_crc;
sigblock->middle_len = msg->hdr.middle_len;
sigblock->data_crc = msg->footer.data_crc;
sigblock->data_len = msg->hdr.data_len;
sigblock->seq_lower_word = *(__le32 *)&msg->hdr.seq;
/* no leading len, no ceph_x_encrypt_header */
ret = ceph_crypt(&au->session_key, true, enc_buf,
CEPHX_AU_ENC_BUF_LEN, sizeof(*sigblock),
&ciphertext_len);
if (ret)
return ret;
*psig = penc->a ^ penc->b ^ penc->c ^ penc->d;
}
return 0;
}
static int ceph_x_sign_message(struct ceph_auth_handshake *auth,
struct ceph_msg *msg)
{
__le64 sig;
int ret;
if (ceph_test_opt(from_msgr(msg->con->msgr), NOMSGSIGN))
return 0;
ret = calc_signature((struct ceph_x_authorizer *)auth->authorizer,
msg, &sig);
if (ret)
return ret;
msg->footer.sig = sig;
msg->footer.flags |= CEPH_MSG_FOOTER_SIGNED;
return 0;
}
static int ceph_x_check_message_signature(struct ceph_auth_handshake *auth,
struct ceph_msg *msg)
{
__le64 sig_check;
int ret;
if (ceph_test_opt(from_msgr(msg->con->msgr), NOMSGSIGN))
return 0;
ret = calc_signature((struct ceph_x_authorizer *)auth->authorizer,
msg, &sig_check);
if (ret)
return ret;
if (sig_check == msg->footer.sig)
return 0;
if (msg->footer.flags & CEPH_MSG_FOOTER_SIGNED)
dout("ceph_x_check_message_signature %p has signature %llx "
"expect %llx\n", msg, msg->footer.sig, sig_check);
else
dout("ceph_x_check_message_signature %p sender did not set "
"CEPH_MSG_FOOTER_SIGNED\n", msg);
return -EBADMSG;
}
static const struct ceph_auth_client_ops ceph_x_ops = {
.is_authenticated = ceph_x_is_authenticated,
.should_authenticate = ceph_x_should_authenticate,
.build_request = ceph_x_build_request,
.handle_reply = ceph_x_handle_reply,
.create_authorizer = ceph_x_create_authorizer,
.update_authorizer = ceph_x_update_authorizer,
.add_authorizer_challenge = ceph_x_add_authorizer_challenge,
.verify_authorizer_reply = ceph_x_verify_authorizer_reply,
.invalidate_authorizer = ceph_x_invalidate_authorizer,
.reset = ceph_x_reset,
.destroy = ceph_x_destroy,
.sign_message = ceph_x_sign_message,
.check_message_signature = ceph_x_check_message_signature,
};
int ceph_x_init(struct ceph_auth_client *ac)
{
struct ceph_x_info *xi;
int ret;
dout("ceph_x_init %p\n", ac);
ret = -ENOMEM;
xi = kzalloc(sizeof(*xi), GFP_NOFS);
if (!xi)
goto out;
ret = -EINVAL;
if (!ac->key) {
pr_err("no secret set (for auth_x protocol)\n");
goto out_nomem;
}
ret = ceph_crypto_key_clone(&xi->secret, ac->key);
if (ret < 0) {
pr_err("cannot clone key: %d\n", ret);
goto out_nomem;
}
xi->starting = true;
xi->ticket_handlers = RB_ROOT;
ac->protocol = CEPH_AUTH_CEPHX;
ac->private = xi;
ac->ops = &ceph_x_ops;
return 0;
out_nomem:
kfree(xi);
out:
return ret;
}
| linux-master | net/ceph/auth_x.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/ceph/ceph_debug.h>
#include <linux/err.h>
#include <linux/scatterlist.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <crypto/aes.h>
#include <crypto/skcipher.h>
#include <linux/key-type.h>
#include <linux/sched/mm.h>
#include <keys/ceph-type.h>
#include <keys/user-type.h>
#include <linux/ceph/decode.h>
#include "crypto.h"
/*
* Set ->key and ->tfm. The rest of the key should be filled in before
* this function is called.
*/
static int set_secret(struct ceph_crypto_key *key, void *buf)
{
unsigned int noio_flag;
int ret;
key->key = NULL;
key->tfm = NULL;
switch (key->type) {
case CEPH_CRYPTO_NONE:
return 0; /* nothing to do */
case CEPH_CRYPTO_AES:
break;
default:
return -ENOTSUPP;
}
if (!key->len)
return -EINVAL;
key->key = kmemdup(buf, key->len, GFP_NOIO);
if (!key->key) {
ret = -ENOMEM;
goto fail;
}
/* crypto_alloc_sync_skcipher() allocates with GFP_KERNEL */
noio_flag = memalloc_noio_save();
key->tfm = crypto_alloc_sync_skcipher("cbc(aes)", 0, 0);
memalloc_noio_restore(noio_flag);
if (IS_ERR(key->tfm)) {
ret = PTR_ERR(key->tfm);
key->tfm = NULL;
goto fail;
}
ret = crypto_sync_skcipher_setkey(key->tfm, key->key, key->len);
if (ret)
goto fail;
return 0;
fail:
ceph_crypto_key_destroy(key);
return ret;
}
int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
const struct ceph_crypto_key *src)
{
memcpy(dst, src, sizeof(struct ceph_crypto_key));
return set_secret(dst, src->key);
}
int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end)
{
if (*p + sizeof(u16) + sizeof(key->created) +
sizeof(u16) + key->len > end)
return -ERANGE;
ceph_encode_16(p, key->type);
ceph_encode_copy(p, &key->created, sizeof(key->created));
ceph_encode_16(p, key->len);
ceph_encode_copy(p, key->key, key->len);
return 0;
}
int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end)
{
int ret;
ceph_decode_need(p, end, 2*sizeof(u16) + sizeof(key->created), bad);
key->type = ceph_decode_16(p);
ceph_decode_copy(p, &key->created, sizeof(key->created));
key->len = ceph_decode_16(p);
ceph_decode_need(p, end, key->len, bad);
ret = set_secret(key, *p);
memzero_explicit(*p, key->len);
*p += key->len;
return ret;
bad:
dout("failed to decode crypto key\n");
return -EINVAL;
}
int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey)
{
int inlen = strlen(inkey);
int blen = inlen * 3 / 4;
void *buf, *p;
int ret;
dout("crypto_key_unarmor %s\n", inkey);
buf = kmalloc(blen, GFP_NOFS);
if (!buf)
return -ENOMEM;
blen = ceph_unarmor(buf, inkey, inkey+inlen);
if (blen < 0) {
kfree(buf);
return blen;
}
p = buf;
ret = ceph_crypto_key_decode(key, &p, p + blen);
kfree(buf);
if (ret)
return ret;
dout("crypto_key_unarmor key %p type %d len %d\n", key,
key->type, key->len);
return 0;
}
void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
{
if (key) {
kfree_sensitive(key->key);
key->key = NULL;
if (key->tfm) {
crypto_free_sync_skcipher(key->tfm);
key->tfm = NULL;
}
}
}
static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
/*
* Should be used for buffers allocated with kvmalloc().
* Currently these are encrypt out-buffer (ceph_buffer) and decrypt
* in-buffer (msg front).
*
* Dispose of @sgt with teardown_sgtable().
*
* @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
* in cases where a single sg is sufficient. No attempt to reduce the
* number of sgs by squeezing physically contiguous pages together is
* made though, for simplicity.
*/
static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
const void *buf, unsigned int buf_len)
{
struct scatterlist *sg;
const bool is_vmalloc = is_vmalloc_addr(buf);
unsigned int off = offset_in_page(buf);
unsigned int chunk_cnt = 1;
unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
int i;
int ret;
if (buf_len == 0) {
memset(sgt, 0, sizeof(*sgt));
return -EINVAL;
}
if (is_vmalloc) {
chunk_cnt = chunk_len >> PAGE_SHIFT;
chunk_len = PAGE_SIZE;
}
if (chunk_cnt > 1) {
ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
if (ret)
return ret;
} else {
WARN_ON(chunk_cnt != 1);
sg_init_table(prealloc_sg, 1);
sgt->sgl = prealloc_sg;
sgt->nents = sgt->orig_nents = 1;
}
for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
struct page *page;
unsigned int len = min(chunk_len - off, buf_len);
if (is_vmalloc)
page = vmalloc_to_page(buf);
else
page = virt_to_page(buf);
sg_set_page(sg, page, len, off);
off = 0;
buf += len;
buf_len -= len;
}
WARN_ON(buf_len != 0);
return 0;
}
static void teardown_sgtable(struct sg_table *sgt)
{
if (sgt->orig_nents > 1)
sg_free_table(sgt);
}
static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
void *buf, int buf_len, int in_len, int *pout_len)
{
SYNC_SKCIPHER_REQUEST_ON_STACK(req, key->tfm);
struct sg_table sgt;
struct scatterlist prealloc_sg;
char iv[AES_BLOCK_SIZE] __aligned(8);
int pad_byte = AES_BLOCK_SIZE - (in_len & (AES_BLOCK_SIZE - 1));
int crypt_len = encrypt ? in_len + pad_byte : in_len;
int ret;
WARN_ON(crypt_len > buf_len);
if (encrypt)
memset(buf + in_len, pad_byte, pad_byte);
ret = setup_sgtable(&sgt, &prealloc_sg, buf, crypt_len);
if (ret)
return ret;
memcpy(iv, aes_iv, AES_BLOCK_SIZE);
skcipher_request_set_sync_tfm(req, key->tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sgt.sgl, sgt.sgl, crypt_len, iv);
/*
print_hex_dump(KERN_ERR, "key: ", DUMP_PREFIX_NONE, 16, 1,
key->key, key->len, 1);
print_hex_dump(KERN_ERR, " in: ", DUMP_PREFIX_NONE, 16, 1,
buf, crypt_len, 1);
*/
if (encrypt)
ret = crypto_skcipher_encrypt(req);
else
ret = crypto_skcipher_decrypt(req);
skcipher_request_zero(req);
if (ret) {
pr_err("%s %scrypt failed: %d\n", __func__,
encrypt ? "en" : "de", ret);
goto out_sgt;
}
/*
print_hex_dump(KERN_ERR, "out: ", DUMP_PREFIX_NONE, 16, 1,
buf, crypt_len, 1);
*/
if (encrypt) {
*pout_len = crypt_len;
} else {
pad_byte = *(char *)(buf + in_len - 1);
if (pad_byte > 0 && pad_byte <= AES_BLOCK_SIZE &&
in_len >= pad_byte) {
*pout_len = in_len - pad_byte;
} else {
pr_err("%s got bad padding %d on in_len %d\n",
__func__, pad_byte, in_len);
ret = -EPERM;
goto out_sgt;
}
}
out_sgt:
teardown_sgtable(&sgt);
return ret;
}
int ceph_crypt(const struct ceph_crypto_key *key, bool encrypt,
void *buf, int buf_len, int in_len, int *pout_len)
{
switch (key->type) {
case CEPH_CRYPTO_NONE:
*pout_len = in_len;
return 0;
case CEPH_CRYPTO_AES:
return ceph_aes_crypt(key, encrypt, buf, buf_len, in_len,
pout_len);
default:
return -ENOTSUPP;
}
}
static int ceph_key_preparse(struct key_preparsed_payload *prep)
{
struct ceph_crypto_key *ckey;
size_t datalen = prep->datalen;
int ret;
void *p;
ret = -EINVAL;
if (datalen <= 0 || datalen > 32767 || !prep->data)
goto err;
ret = -ENOMEM;
ckey = kmalloc(sizeof(*ckey), GFP_KERNEL);
if (!ckey)
goto err;
/* TODO ceph_crypto_key_decode should really take const input */
p = (void *)prep->data;
ret = ceph_crypto_key_decode(ckey, &p, (char*)prep->data+datalen);
if (ret < 0)
goto err_ckey;
prep->payload.data[0] = ckey;
prep->quotalen = datalen;
return 0;
err_ckey:
kfree(ckey);
err:
return ret;
}
static void ceph_key_free_preparse(struct key_preparsed_payload *prep)
{
struct ceph_crypto_key *ckey = prep->payload.data[0];
ceph_crypto_key_destroy(ckey);
kfree(ckey);
}
static void ceph_key_destroy(struct key *key)
{
struct ceph_crypto_key *ckey = key->payload.data[0];
ceph_crypto_key_destroy(ckey);
kfree(ckey);
}
struct key_type key_type_ceph = {
.name = "ceph",
.preparse = ceph_key_preparse,
.free_preparse = ceph_key_free_preparse,
.instantiate = generic_key_instantiate,
.destroy = ceph_key_destroy,
};
int __init ceph_crypto_init(void)
{
return register_key_type(&key_type_ceph);
}
void ceph_crypto_shutdown(void)
{
unregister_key_type(&key_type_ceph);
}
| linux-master | net/ceph/crypto.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/ceph/ceph_debug.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/ceph/decode.h>
#include <linux/ceph/auth.h>
#include "auth_none.h"
static void reset(struct ceph_auth_client *ac)
{
struct ceph_auth_none_info *xi = ac->private;
xi->starting = true;
}
static void destroy(struct ceph_auth_client *ac)
{
kfree(ac->private);
ac->private = NULL;
}
static int is_authenticated(struct ceph_auth_client *ac)
{
struct ceph_auth_none_info *xi = ac->private;
return !xi->starting;
}
static int should_authenticate(struct ceph_auth_client *ac)
{
struct ceph_auth_none_info *xi = ac->private;
return xi->starting;
}
static int ceph_auth_none_build_authorizer(struct ceph_auth_client *ac,
struct ceph_none_authorizer *au)
{
void *p = au->buf;
void *const end = p + sizeof(au->buf);
int ret;
ceph_encode_8_safe(&p, end, 1, e_range);
ret = ceph_auth_entity_name_encode(ac->name, &p, end);
if (ret < 0)
return ret;
ceph_encode_64_safe(&p, end, ac->global_id, e_range);
au->buf_len = p - (void *)au->buf;
dout("%s built authorizer len %d\n", __func__, au->buf_len);
return 0;
e_range:
return -ERANGE;
}
static int build_request(struct ceph_auth_client *ac, void *buf, void *end)
{
return 0;
}
/*
* the generic auth code decode the global_id, and we carry no actual
* authenticate state, so nothing happens here.
*/
static int handle_reply(struct ceph_auth_client *ac, u64 global_id,
void *buf, void *end, u8 *session_key,
int *session_key_len, u8 *con_secret,
int *con_secret_len)
{
struct ceph_auth_none_info *xi = ac->private;
xi->starting = false;
ceph_auth_set_global_id(ac, global_id);
return 0;
}
static void ceph_auth_none_destroy_authorizer(struct ceph_authorizer *a)
{
kfree(a);
}
/*
* build an 'authorizer' with our entity_name and global_id. it is
* identical for all services we connect to.
*/
static int ceph_auth_none_create_authorizer(
struct ceph_auth_client *ac, int peer_type,
struct ceph_auth_handshake *auth)
{
struct ceph_none_authorizer *au;
int ret;
au = kmalloc(sizeof(*au), GFP_NOFS);
if (!au)
return -ENOMEM;
au->base.destroy = ceph_auth_none_destroy_authorizer;
ret = ceph_auth_none_build_authorizer(ac, au);
if (ret) {
kfree(au);
return ret;
}
auth->authorizer = (struct ceph_authorizer *) au;
auth->authorizer_buf = au->buf;
auth->authorizer_buf_len = au->buf_len;
auth->authorizer_reply_buf = NULL;
auth->authorizer_reply_buf_len = 0;
return 0;
}
static const struct ceph_auth_client_ops ceph_auth_none_ops = {
.reset = reset,
.destroy = destroy,
.is_authenticated = is_authenticated,
.should_authenticate = should_authenticate,
.build_request = build_request,
.handle_reply = handle_reply,
.create_authorizer = ceph_auth_none_create_authorizer,
};
int ceph_auth_none_init(struct ceph_auth_client *ac)
{
struct ceph_auth_none_info *xi;
dout("ceph_auth_none_init %p\n", ac);
xi = kzalloc(sizeof(*xi), GFP_NOFS);
if (!xi)
return -ENOMEM;
xi->starting = true;
ac->protocol = CEPH_AUTH_NONE;
ac->private = xi;
ac->ops = &ceph_auth_none_ops;
return 0;
}
| linux-master | net/ceph/auth_none.c |
// SPDX-License-Identifier: GPL-2.0
#ifdef __KERNEL__
# include <linux/slab.h>
# include <linux/crush/crush.h>
#else
# include "crush_compat.h"
# include "crush.h"
#endif
const char *crush_bucket_alg_name(int alg)
{
switch (alg) {
case CRUSH_BUCKET_UNIFORM: return "uniform";
case CRUSH_BUCKET_LIST: return "list";
case CRUSH_BUCKET_TREE: return "tree";
case CRUSH_BUCKET_STRAW: return "straw";
case CRUSH_BUCKET_STRAW2: return "straw2";
default: return "unknown";
}
}
/**
* crush_get_bucket_item_weight - Get weight of an item in given bucket
* @b: bucket pointer
* @p: item index in bucket
*/
int crush_get_bucket_item_weight(const struct crush_bucket *b, int p)
{
if ((__u32)p >= b->size)
return 0;
switch (b->alg) {
case CRUSH_BUCKET_UNIFORM:
return ((struct crush_bucket_uniform *)b)->item_weight;
case CRUSH_BUCKET_LIST:
return ((struct crush_bucket_list *)b)->item_weights[p];
case CRUSH_BUCKET_TREE:
return ((struct crush_bucket_tree *)b)->node_weights[crush_calc_tree_node(p)];
case CRUSH_BUCKET_STRAW:
return ((struct crush_bucket_straw *)b)->item_weights[p];
case CRUSH_BUCKET_STRAW2:
return ((struct crush_bucket_straw2 *)b)->item_weights[p];
}
return 0;
}
void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b)
{
kfree(b->h.items);
kfree(b);
}
void crush_destroy_bucket_list(struct crush_bucket_list *b)
{
kfree(b->item_weights);
kfree(b->sum_weights);
kfree(b->h.items);
kfree(b);
}
void crush_destroy_bucket_tree(struct crush_bucket_tree *b)
{
kfree(b->h.items);
kfree(b->node_weights);
kfree(b);
}
void crush_destroy_bucket_straw(struct crush_bucket_straw *b)
{
kfree(b->straws);
kfree(b->item_weights);
kfree(b->h.items);
kfree(b);
}
void crush_destroy_bucket_straw2(struct crush_bucket_straw2 *b)
{
kfree(b->item_weights);
kfree(b->h.items);
kfree(b);
}
void crush_destroy_bucket(struct crush_bucket *b)
{
switch (b->alg) {
case CRUSH_BUCKET_UNIFORM:
crush_destroy_bucket_uniform((struct crush_bucket_uniform *)b);
break;
case CRUSH_BUCKET_LIST:
crush_destroy_bucket_list((struct crush_bucket_list *)b);
break;
case CRUSH_BUCKET_TREE:
crush_destroy_bucket_tree((struct crush_bucket_tree *)b);
break;
case CRUSH_BUCKET_STRAW:
crush_destroy_bucket_straw((struct crush_bucket_straw *)b);
break;
case CRUSH_BUCKET_STRAW2:
crush_destroy_bucket_straw2((struct crush_bucket_straw2 *)b);
break;
}
}
/**
* crush_destroy - Destroy a crush_map
* @map: crush_map pointer
*/
void crush_destroy(struct crush_map *map)
{
/* buckets */
if (map->buckets) {
__s32 b;
for (b = 0; b < map->max_buckets; b++) {
if (map->buckets[b] == NULL)
continue;
crush_destroy_bucket(map->buckets[b]);
}
kfree(map->buckets);
}
/* rules */
if (map->rules) {
__u32 b;
for (b = 0; b < map->max_rules; b++)
crush_destroy_rule(map->rules[b]);
kfree(map->rules);
}
#ifndef __KERNEL__
kfree(map->choose_tries);
#else
clear_crush_names(&map->type_names);
clear_crush_names(&map->names);
clear_choose_args(map);
#endif
kfree(map);
}
void crush_destroy_rule(struct crush_rule *rule)
{
kfree(rule);
}
| linux-master | net/ceph/crush/crush.c |
// SPDX-License-Identifier: GPL-2.0
#ifdef __KERNEL__
# include <linux/crush/hash.h>
#else
# include "hash.h"
#endif
/*
* Robert Jenkins' function for mixing 32-bit values
* https://burtleburtle.net/bob/hash/evahash.html
* a, b = random bits, c = input and output
*/
#define crush_hashmix(a, b, c) do { \
a = a-b; a = a-c; a = a^(c>>13); \
b = b-c; b = b-a; b = b^(a<<8); \
c = c-a; c = c-b; c = c^(b>>13); \
a = a-b; a = a-c; a = a^(c>>12); \
b = b-c; b = b-a; b = b^(a<<16); \
c = c-a; c = c-b; c = c^(b>>5); \
a = a-b; a = a-c; a = a^(c>>3); \
b = b-c; b = b-a; b = b^(a<<10); \
c = c-a; c = c-b; c = c^(b>>15); \
} while (0)
#define crush_hash_seed 1315423911
static __u32 crush_hash32_rjenkins1(__u32 a)
{
__u32 hash = crush_hash_seed ^ a;
__u32 b = a;
__u32 x = 231232;
__u32 y = 1232;
crush_hashmix(b, x, hash);
crush_hashmix(y, a, hash);
return hash;
}
static __u32 crush_hash32_rjenkins1_2(__u32 a, __u32 b)
{
__u32 hash = crush_hash_seed ^ a ^ b;
__u32 x = 231232;
__u32 y = 1232;
crush_hashmix(a, b, hash);
crush_hashmix(x, a, hash);
crush_hashmix(b, y, hash);
return hash;
}
static __u32 crush_hash32_rjenkins1_3(__u32 a, __u32 b, __u32 c)
{
__u32 hash = crush_hash_seed ^ a ^ b ^ c;
__u32 x = 231232;
__u32 y = 1232;
crush_hashmix(a, b, hash);
crush_hashmix(c, x, hash);
crush_hashmix(y, a, hash);
crush_hashmix(b, x, hash);
crush_hashmix(y, c, hash);
return hash;
}
static __u32 crush_hash32_rjenkins1_4(__u32 a, __u32 b, __u32 c, __u32 d)
{
__u32 hash = crush_hash_seed ^ a ^ b ^ c ^ d;
__u32 x = 231232;
__u32 y = 1232;
crush_hashmix(a, b, hash);
crush_hashmix(c, d, hash);
crush_hashmix(a, x, hash);
crush_hashmix(y, b, hash);
crush_hashmix(c, x, hash);
crush_hashmix(y, d, hash);
return hash;
}
static __u32 crush_hash32_rjenkins1_5(__u32 a, __u32 b, __u32 c, __u32 d,
__u32 e)
{
__u32 hash = crush_hash_seed ^ a ^ b ^ c ^ d ^ e;
__u32 x = 231232;
__u32 y = 1232;
crush_hashmix(a, b, hash);
crush_hashmix(c, d, hash);
crush_hashmix(e, x, hash);
crush_hashmix(y, a, hash);
crush_hashmix(b, x, hash);
crush_hashmix(y, c, hash);
crush_hashmix(d, x, hash);
crush_hashmix(y, e, hash);
return hash;
}
__u32 crush_hash32(int type, __u32 a)
{
switch (type) {
case CRUSH_HASH_RJENKINS1:
return crush_hash32_rjenkins1(a);
default:
return 0;
}
}
__u32 crush_hash32_2(int type, __u32 a, __u32 b)
{
switch (type) {
case CRUSH_HASH_RJENKINS1:
return crush_hash32_rjenkins1_2(a, b);
default:
return 0;
}
}
__u32 crush_hash32_3(int type, __u32 a, __u32 b, __u32 c)
{
switch (type) {
case CRUSH_HASH_RJENKINS1:
return crush_hash32_rjenkins1_3(a, b, c);
default:
return 0;
}
}
__u32 crush_hash32_4(int type, __u32 a, __u32 b, __u32 c, __u32 d)
{
switch (type) {
case CRUSH_HASH_RJENKINS1:
return crush_hash32_rjenkins1_4(a, b, c, d);
default:
return 0;
}
}
__u32 crush_hash32_5(int type, __u32 a, __u32 b, __u32 c, __u32 d, __u32 e)
{
switch (type) {
case CRUSH_HASH_RJENKINS1:
return crush_hash32_rjenkins1_5(a, b, c, d, e);
default:
return 0;
}
}
const char *crush_hash_name(int type)
{
switch (type) {
case CRUSH_HASH_RJENKINS1:
return "rjenkins1";
default:
return "unknown";
}
}
| linux-master | net/ceph/crush/hash.c |
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Intel Corporation All Rights Reserved
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifdef __KERNEL__
# include <linux/string.h>
# include <linux/slab.h>
# include <linux/bug.h>
# include <linux/kernel.h>
# include <linux/crush/crush.h>
# include <linux/crush/hash.h>
# include <linux/crush/mapper.h>
#else
# include "crush_compat.h"
# include "crush.h"
# include "hash.h"
# include "mapper.h"
#endif
#include "crush_ln_table.h"
#define dprintk(args...) /* printf(args) */
/*
* Implement the core CRUSH mapping algorithm.
*/
/**
* crush_find_rule - find a crush_rule id for a given ruleset, type, and size.
* @map: the crush_map
* @ruleset: the storage ruleset id (user defined)
* @type: storage ruleset type (user defined)
* @size: output set size
*/
int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size)
{
__u32 i;
for (i = 0; i < map->max_rules; i++) {
if (map->rules[i] &&
map->rules[i]->mask.ruleset == ruleset &&
map->rules[i]->mask.type == type &&
map->rules[i]->mask.min_size <= size &&
map->rules[i]->mask.max_size >= size)
return i;
}
return -1;
}
/*
* bucket choose methods
*
* For each bucket algorithm, we have a "choose" method that, given a
* crush input @x and replica position (usually, position in output set) @r,
* will produce an item in the bucket.
*/
/*
* Choose based on a random permutation of the bucket.
*
* We used to use some prime number arithmetic to do this, but it
* wasn't very random, and had some other bad behaviors. Instead, we
* calculate an actual random permutation of the bucket members.
* Since this is expensive, we optimize for the r=0 case, which
* captures the vast majority of calls.
*/
static int bucket_perm_choose(const struct crush_bucket *bucket,
struct crush_work_bucket *work,
int x, int r)
{
unsigned int pr = r % bucket->size;
unsigned int i, s;
/* start a new permutation if @x has changed */
if (work->perm_x != (__u32)x || work->perm_n == 0) {
dprintk("bucket %d new x=%d\n", bucket->id, x);
work->perm_x = x;
/* optimize common r=0 case */
if (pr == 0) {
s = crush_hash32_3(bucket->hash, x, bucket->id, 0) %
bucket->size;
work->perm[0] = s;
work->perm_n = 0xffff; /* magic value, see below */
goto out;
}
for (i = 0; i < bucket->size; i++)
work->perm[i] = i;
work->perm_n = 0;
} else if (work->perm_n == 0xffff) {
/* clean up after the r=0 case above */
for (i = 1; i < bucket->size; i++)
work->perm[i] = i;
work->perm[work->perm[0]] = 0;
work->perm_n = 1;
}
/* calculate permutation up to pr */
for (i = 0; i < work->perm_n; i++)
dprintk(" perm_choose have %d: %d\n", i, work->perm[i]);
while (work->perm_n <= pr) {
unsigned int p = work->perm_n;
/* no point in swapping the final entry */
if (p < bucket->size - 1) {
i = crush_hash32_3(bucket->hash, x, bucket->id, p) %
(bucket->size - p);
if (i) {
unsigned int t = work->perm[p + i];
work->perm[p + i] = work->perm[p];
work->perm[p] = t;
}
dprintk(" perm_choose swap %d with %d\n", p, p+i);
}
work->perm_n++;
}
for (i = 0; i < bucket->size; i++)
dprintk(" perm_choose %d: %d\n", i, work->perm[i]);
s = work->perm[pr];
out:
dprintk(" perm_choose %d sz=%d x=%d r=%d (%d) s=%d\n", bucket->id,
bucket->size, x, r, pr, s);
return bucket->items[s];
}
/* uniform */
static int bucket_uniform_choose(const struct crush_bucket_uniform *bucket,
struct crush_work_bucket *work, int x, int r)
{
return bucket_perm_choose(&bucket->h, work, x, r);
}
/* list */
static int bucket_list_choose(const struct crush_bucket_list *bucket,
int x, int r)
{
int i;
for (i = bucket->h.size-1; i >= 0; i--) {
__u64 w = crush_hash32_4(bucket->h.hash, x, bucket->h.items[i],
r, bucket->h.id);
w &= 0xffff;
dprintk("list_choose i=%d x=%d r=%d item %d weight %x "
"sw %x rand %llx",
i, x, r, bucket->h.items[i], bucket->item_weights[i],
bucket->sum_weights[i], w);
w *= bucket->sum_weights[i];
w = w >> 16;
/*dprintk(" scaled %llx\n", w);*/
if (w < bucket->item_weights[i]) {
return bucket->h.items[i];
}
}
dprintk("bad list sums for bucket %d\n", bucket->h.id);
return bucket->h.items[0];
}
/* (binary) tree */
static int height(int n)
{
int h = 0;
while ((n & 1) == 0) {
h++;
n = n >> 1;
}
return h;
}
static int left(int x)
{
int h = height(x);
return x - (1 << (h-1));
}
static int right(int x)
{
int h = height(x);
return x + (1 << (h-1));
}
static int terminal(int x)
{
return x & 1;
}
static int bucket_tree_choose(const struct crush_bucket_tree *bucket,
int x, int r)
{
int n;
__u32 w;
__u64 t;
/* start at root */
n = bucket->num_nodes >> 1;
while (!terminal(n)) {
int l;
/* pick point in [0, w) */
w = bucket->node_weights[n];
t = (__u64)crush_hash32_4(bucket->h.hash, x, n, r,
bucket->h.id) * (__u64)w;
t = t >> 32;
/* descend to the left or right? */
l = left(n);
if (t < bucket->node_weights[l])
n = l;
else
n = right(n);
}
return bucket->h.items[n >> 1];
}
/* straw */
static int bucket_straw_choose(const struct crush_bucket_straw *bucket,
int x, int r)
{
__u32 i;
int high = 0;
__u64 high_draw = 0;
__u64 draw;
for (i = 0; i < bucket->h.size; i++) {
draw = crush_hash32_3(bucket->h.hash, x, bucket->h.items[i], r);
draw &= 0xffff;
draw *= bucket->straws[i];
if (i == 0 || draw > high_draw) {
high = i;
high_draw = draw;
}
}
return bucket->h.items[high];
}
/* compute 2^44*log2(input+1) */
static __u64 crush_ln(unsigned int xin)
{
unsigned int x = xin;
int iexpon, index1, index2;
__u64 RH, LH, LL, xl64, result;
x++;
/* normalize input */
iexpon = 15;
/*
* figure out number of bits we need to shift and
* do it in one step instead of iteratively
*/
if (!(x & 0x18000)) {
int bits = __builtin_clz(x & 0x1FFFF) - 16;
x <<= bits;
iexpon = 15 - bits;
}
index1 = (x >> 8) << 1;
/* RH ~ 2^56/index1 */
RH = __RH_LH_tbl[index1 - 256];
/* LH ~ 2^48 * log2(index1/256) */
LH = __RH_LH_tbl[index1 + 1 - 256];
/* RH*x ~ 2^48 * (2^15 + xf), xf<2^8 */
xl64 = (__s64)x * RH;
xl64 >>= 48;
result = iexpon;
result <<= (12 + 32);
index2 = xl64 & 0xff;
/* LL ~ 2^48*log2(1.0+index2/2^15) */
LL = __LL_tbl[index2];
LH = LH + LL;
LH >>= (48 - 12 - 32);
result += LH;
return result;
}
/*
* straw2
*
* for reference, see:
*
* https://en.wikipedia.org/wiki/Exponential_distribution#Distribution_of_the_minimum_of_exponential_random_variables
*
*/
static __u32 *get_choose_arg_weights(const struct crush_bucket_straw2 *bucket,
const struct crush_choose_arg *arg,
int position)
{
if (!arg || !arg->weight_set)
return bucket->item_weights;
if (position >= arg->weight_set_size)
position = arg->weight_set_size - 1;
return arg->weight_set[position].weights;
}
static __s32 *get_choose_arg_ids(const struct crush_bucket_straw2 *bucket,
const struct crush_choose_arg *arg)
{
if (!arg || !arg->ids)
return bucket->h.items;
return arg->ids;
}
static int bucket_straw2_choose(const struct crush_bucket_straw2 *bucket,
int x, int r,
const struct crush_choose_arg *arg,
int position)
{
unsigned int i, high = 0;
unsigned int u;
__s64 ln, draw, high_draw = 0;
__u32 *weights = get_choose_arg_weights(bucket, arg, position);
__s32 *ids = get_choose_arg_ids(bucket, arg);
for (i = 0; i < bucket->h.size; i++) {
dprintk("weight 0x%x item %d\n", weights[i], ids[i]);
if (weights[i]) {
u = crush_hash32_3(bucket->h.hash, x, ids[i], r);
u &= 0xffff;
/*
* for some reason slightly less than 0x10000 produces
* a slightly more accurate distribution... probably a
* rounding effect.
*
* the natural log lookup table maps [0,0xffff]
* (corresponding to real numbers [1/0x10000, 1] to
* [0, 0xffffffffffff] (corresponding to real numbers
* [-11.090355,0]).
*/
ln = crush_ln(u) - 0x1000000000000ll;
/*
* divide by 16.16 fixed-point weight. note
* that the ln value is negative, so a larger
* weight means a larger (less negative) value
* for draw.
*/
draw = div64_s64(ln, weights[i]);
} else {
draw = S64_MIN;
}
if (i == 0 || draw > high_draw) {
high = i;
high_draw = draw;
}
}
return bucket->h.items[high];
}
static int crush_bucket_choose(const struct crush_bucket *in,
struct crush_work_bucket *work,
int x, int r,
const struct crush_choose_arg *arg,
int position)
{
dprintk(" crush_bucket_choose %d x=%d r=%d\n", in->id, x, r);
BUG_ON(in->size == 0);
switch (in->alg) {
case CRUSH_BUCKET_UNIFORM:
return bucket_uniform_choose(
(const struct crush_bucket_uniform *)in,
work, x, r);
case CRUSH_BUCKET_LIST:
return bucket_list_choose((const struct crush_bucket_list *)in,
x, r);
case CRUSH_BUCKET_TREE:
return bucket_tree_choose((const struct crush_bucket_tree *)in,
x, r);
case CRUSH_BUCKET_STRAW:
return bucket_straw_choose(
(const struct crush_bucket_straw *)in,
x, r);
case CRUSH_BUCKET_STRAW2:
return bucket_straw2_choose(
(const struct crush_bucket_straw2 *)in,
x, r, arg, position);
default:
dprintk("unknown bucket %d alg %d\n", in->id, in->alg);
return in->items[0];
}
}
/*
* true if device is marked "out" (failed, fully offloaded)
* of the cluster
*/
static int is_out(const struct crush_map *map,
const __u32 *weight, int weight_max,
int item, int x)
{
if (item >= weight_max)
return 1;
if (weight[item] >= 0x10000)
return 0;
if (weight[item] == 0)
return 1;
if ((crush_hash32_2(CRUSH_HASH_RJENKINS1, x, item) & 0xffff)
< weight[item])
return 0;
return 1;
}
/**
* crush_choose_firstn - choose numrep distinct items of given type
* @map: the crush_map
* @bucket: the bucket we are choose an item from
* @x: crush input value
* @numrep: the number of items to choose
* @type: the type of item to choose
* @out: pointer to output vector
* @outpos: our position in that vector
* @out_size: size of the out vector
* @tries: number of attempts to make
* @recurse_tries: number of attempts to have recursive chooseleaf make
* @local_retries: localized retries
* @local_fallback_retries: localized fallback retries
* @recurse_to_leaf: true if we want one device under each item of given type (chooseleaf instead of choose)
* @stable: stable mode starts rep=0 in the recursive call for all replicas
* @vary_r: pass r to recursive calls
* @out2: second output vector for leaf items (if @recurse_to_leaf)
* @parent_r: r value passed from the parent
*/
static int crush_choose_firstn(const struct crush_map *map,
struct crush_work *work,
const struct crush_bucket *bucket,
const __u32 *weight, int weight_max,
int x, int numrep, int type,
int *out, int outpos,
int out_size,
unsigned int tries,
unsigned int recurse_tries,
unsigned int local_retries,
unsigned int local_fallback_retries,
int recurse_to_leaf,
unsigned int vary_r,
unsigned int stable,
int *out2,
int parent_r,
const struct crush_choose_arg *choose_args)
{
int rep;
unsigned int ftotal, flocal;
int retry_descent, retry_bucket, skip_rep;
const struct crush_bucket *in = bucket;
int r;
int i;
int item = 0;
int itemtype;
int collide, reject;
int count = out_size;
dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d tries %d recurse_tries %d local_retries %d local_fallback_retries %d parent_r %d stable %d\n",
recurse_to_leaf ? "_LEAF" : "",
bucket->id, x, outpos, numrep,
tries, recurse_tries, local_retries, local_fallback_retries,
parent_r, stable);
for (rep = stable ? 0 : outpos; rep < numrep && count > 0 ; rep++) {
/* keep trying until we get a non-out, non-colliding item */
ftotal = 0;
skip_rep = 0;
do {
retry_descent = 0;
in = bucket; /* initial bucket */
/* choose through intervening buckets */
flocal = 0;
do {
collide = 0;
retry_bucket = 0;
r = rep + parent_r;
/* r' = r + f_total */
r += ftotal;
/* bucket choose */
if (in->size == 0) {
reject = 1;
goto reject;
}
if (local_fallback_retries > 0 &&
flocal >= (in->size>>1) &&
flocal > local_fallback_retries)
item = bucket_perm_choose(
in, work->work[-1-in->id],
x, r);
else
item = crush_bucket_choose(
in, work->work[-1-in->id],
x, r,
(choose_args ?
&choose_args[-1-in->id] : NULL),
outpos);
if (item >= map->max_devices) {
dprintk(" bad item %d\n", item);
skip_rep = 1;
break;
}
/* desired type? */
if (item < 0)
itemtype = map->buckets[-1-item]->type;
else
itemtype = 0;
dprintk(" item %d type %d\n", item, itemtype);
/* keep going? */
if (itemtype != type) {
if (item >= 0 ||
(-1-item) >= map->max_buckets) {
dprintk(" bad item type %d\n", type);
skip_rep = 1;
break;
}
in = map->buckets[-1-item];
retry_bucket = 1;
continue;
}
/* collision? */
for (i = 0; i < outpos; i++) {
if (out[i] == item) {
collide = 1;
break;
}
}
reject = 0;
if (!collide && recurse_to_leaf) {
if (item < 0) {
int sub_r;
if (vary_r)
sub_r = r >> (vary_r-1);
else
sub_r = 0;
if (crush_choose_firstn(
map,
work,
map->buckets[-1-item],
weight, weight_max,
x, stable ? 1 : outpos+1, 0,
out2, outpos, count,
recurse_tries, 0,
local_retries,
local_fallback_retries,
0,
vary_r,
stable,
NULL,
sub_r,
choose_args) <= outpos)
/* didn't get leaf */
reject = 1;
} else {
/* we already have a leaf! */
out2[outpos] = item;
}
}
if (!reject && !collide) {
/* out? */
if (itemtype == 0)
reject = is_out(map, weight,
weight_max,
item, x);
}
reject:
if (reject || collide) {
ftotal++;
flocal++;
if (collide && flocal <= local_retries)
/* retry locally a few times */
retry_bucket = 1;
else if (local_fallback_retries > 0 &&
flocal <= in->size + local_fallback_retries)
/* exhaustive bucket search */
retry_bucket = 1;
else if (ftotal < tries)
/* then retry descent */
retry_descent = 1;
else
/* else give up */
skip_rep = 1;
dprintk(" reject %d collide %d "
"ftotal %u flocal %u\n",
reject, collide, ftotal,
flocal);
}
} while (retry_bucket);
} while (retry_descent);
if (skip_rep) {
dprintk("skip rep\n");
continue;
}
dprintk("CHOOSE got %d\n", item);
out[outpos] = item;
outpos++;
count--;
#ifndef __KERNEL__
if (map->choose_tries && ftotal <= map->choose_total_tries)
map->choose_tries[ftotal]++;
#endif
}
dprintk("CHOOSE returns %d\n", outpos);
return outpos;
}
/**
* crush_choose_indep: alternative breadth-first positionally stable mapping
*
*/
static void crush_choose_indep(const struct crush_map *map,
struct crush_work *work,
const struct crush_bucket *bucket,
const __u32 *weight, int weight_max,
int x, int left, int numrep, int type,
int *out, int outpos,
unsigned int tries,
unsigned int recurse_tries,
int recurse_to_leaf,
int *out2,
int parent_r,
const struct crush_choose_arg *choose_args)
{
const struct crush_bucket *in = bucket;
int endpos = outpos + left;
int rep;
unsigned int ftotal;
int r;
int i;
int item = 0;
int itemtype;
int collide;
dprintk("CHOOSE%s INDEP bucket %d x %d outpos %d numrep %d\n", recurse_to_leaf ? "_LEAF" : "",
bucket->id, x, outpos, numrep);
/* initially my result is undefined */
for (rep = outpos; rep < endpos; rep++) {
out[rep] = CRUSH_ITEM_UNDEF;
if (out2)
out2[rep] = CRUSH_ITEM_UNDEF;
}
for (ftotal = 0; left > 0 && ftotal < tries; ftotal++) {
#ifdef DEBUG_INDEP
if (out2 && ftotal) {
dprintk("%u %d a: ", ftotal, left);
for (rep = outpos; rep < endpos; rep++) {
dprintk(" %d", out[rep]);
}
dprintk("\n");
dprintk("%u %d b: ", ftotal, left);
for (rep = outpos; rep < endpos; rep++) {
dprintk(" %d", out2[rep]);
}
dprintk("\n");
}
#endif
for (rep = outpos; rep < endpos; rep++) {
if (out[rep] != CRUSH_ITEM_UNDEF)
continue;
in = bucket; /* initial bucket */
/* choose through intervening buckets */
for (;;) {
/* note: we base the choice on the position
* even in the nested call. that means that
* if the first layer chooses the same bucket
* in a different position, we will tend to
* choose a different item in that bucket.
* this will involve more devices in data
* movement and tend to distribute the load.
*/
r = rep + parent_r;
/* be careful */
if (in->alg == CRUSH_BUCKET_UNIFORM &&
in->size % numrep == 0)
/* r'=r+(n+1)*f_total */
r += (numrep+1) * ftotal;
else
/* r' = r + n*f_total */
r += numrep * ftotal;
/* bucket choose */
if (in->size == 0) {
dprintk(" empty bucket\n");
break;
}
item = crush_bucket_choose(
in, work->work[-1-in->id],
x, r,
(choose_args ?
&choose_args[-1-in->id] : NULL),
outpos);
if (item >= map->max_devices) {
dprintk(" bad item %d\n", item);
out[rep] = CRUSH_ITEM_NONE;
if (out2)
out2[rep] = CRUSH_ITEM_NONE;
left--;
break;
}
/* desired type? */
if (item < 0)
itemtype = map->buckets[-1-item]->type;
else
itemtype = 0;
dprintk(" item %d type %d\n", item, itemtype);
/* keep going? */
if (itemtype != type) {
if (item >= 0 ||
(-1-item) >= map->max_buckets) {
dprintk(" bad item type %d\n", type);
out[rep] = CRUSH_ITEM_NONE;
if (out2)
out2[rep] =
CRUSH_ITEM_NONE;
left--;
break;
}
in = map->buckets[-1-item];
continue;
}
/* collision? */
collide = 0;
for (i = outpos; i < endpos; i++) {
if (out[i] == item) {
collide = 1;
break;
}
}
if (collide)
break;
if (recurse_to_leaf) {
if (item < 0) {
crush_choose_indep(
map,
work,
map->buckets[-1-item],
weight, weight_max,
x, 1, numrep, 0,
out2, rep,
recurse_tries, 0,
0, NULL, r,
choose_args);
if (out2[rep] == CRUSH_ITEM_NONE) {
/* placed nothing; no leaf */
break;
}
} else {
/* we already have a leaf! */
out2[rep] = item;
}
}
/* out? */
if (itemtype == 0 &&
is_out(map, weight, weight_max, item, x))
break;
/* yay! */
out[rep] = item;
left--;
break;
}
}
}
for (rep = outpos; rep < endpos; rep++) {
if (out[rep] == CRUSH_ITEM_UNDEF) {
out[rep] = CRUSH_ITEM_NONE;
}
if (out2 && out2[rep] == CRUSH_ITEM_UNDEF) {
out2[rep] = CRUSH_ITEM_NONE;
}
}
#ifndef __KERNEL__
if (map->choose_tries && ftotal <= map->choose_total_tries)
map->choose_tries[ftotal]++;
#endif
#ifdef DEBUG_INDEP
if (out2) {
dprintk("%u %d a: ", ftotal, left);
for (rep = outpos; rep < endpos; rep++) {
dprintk(" %d", out[rep]);
}
dprintk("\n");
dprintk("%u %d b: ", ftotal, left);
for (rep = outpos; rep < endpos; rep++) {
dprintk(" %d", out2[rep]);
}
dprintk("\n");
}
#endif
}
/*
* This takes a chunk of memory and sets it up to be a shiny new
* working area for a CRUSH placement computation. It must be called
* on any newly allocated memory before passing it in to
* crush_do_rule. It may be used repeatedly after that, so long as the
* map has not changed. If the map /has/ changed, you must make sure
* the working size is no smaller than what was allocated and re-run
* crush_init_workspace.
*
* If you do retain the working space between calls to crush, make it
* thread-local.
*/
void crush_init_workspace(const struct crush_map *map, void *v)
{
struct crush_work *w = v;
__s32 b;
/*
* We work by moving through the available space and setting
* values and pointers as we go.
*
* It's a bit like Forth's use of the 'allot' word since we
* set the pointer first and then reserve the space for it to
* point to by incrementing the point.
*/
v += sizeof(struct crush_work);
w->work = v;
v += map->max_buckets * sizeof(struct crush_work_bucket *);
for (b = 0; b < map->max_buckets; ++b) {
if (!map->buckets[b])
continue;
w->work[b] = v;
switch (map->buckets[b]->alg) {
default:
v += sizeof(struct crush_work_bucket);
break;
}
w->work[b]->perm_x = 0;
w->work[b]->perm_n = 0;
w->work[b]->perm = v;
v += map->buckets[b]->size * sizeof(__u32);
}
BUG_ON(v - (void *)w != map->working_size);
}
/**
* crush_do_rule - calculate a mapping with the given input and rule
* @map: the crush_map
* @ruleno: the rule id
* @x: hash input
* @result: pointer to result vector
* @result_max: maximum result size
* @weight: weight vector (for map leaves)
* @weight_max: size of weight vector
* @cwin: pointer to at least crush_work_size() bytes of memory
* @choose_args: weights and ids for each known bucket
*/
int crush_do_rule(const struct crush_map *map,
int ruleno, int x, int *result, int result_max,
const __u32 *weight, int weight_max,
void *cwin, const struct crush_choose_arg *choose_args)
{
int result_len;
struct crush_work *cw = cwin;
int *a = cwin + map->working_size;
int *b = a + result_max;
int *c = b + result_max;
int *w = a;
int *o = b;
int recurse_to_leaf;
int wsize = 0;
int osize;
const struct crush_rule *rule;
__u32 step;
int i, j;
int numrep;
int out_size;
/*
* the original choose_total_tries value was off by one (it
* counted "retries" and not "tries"). add one.
*/
int choose_tries = map->choose_total_tries + 1;
int choose_leaf_tries = 0;
/*
* the local tries values were counted as "retries", though,
* and need no adjustment
*/
int choose_local_retries = map->choose_local_tries;
int choose_local_fallback_retries = map->choose_local_fallback_tries;
int vary_r = map->chooseleaf_vary_r;
int stable = map->chooseleaf_stable;
if ((__u32)ruleno >= map->max_rules) {
dprintk(" bad ruleno %d\n", ruleno);
return 0;
}
rule = map->rules[ruleno];
result_len = 0;
for (step = 0; step < rule->len; step++) {
int firstn = 0;
const struct crush_rule_step *curstep = &rule->steps[step];
switch (curstep->op) {
case CRUSH_RULE_TAKE:
if ((curstep->arg1 >= 0 &&
curstep->arg1 < map->max_devices) ||
(-1-curstep->arg1 >= 0 &&
-1-curstep->arg1 < map->max_buckets &&
map->buckets[-1-curstep->arg1])) {
w[0] = curstep->arg1;
wsize = 1;
} else {
dprintk(" bad take value %d\n", curstep->arg1);
}
break;
case CRUSH_RULE_SET_CHOOSE_TRIES:
if (curstep->arg1 > 0)
choose_tries = curstep->arg1;
break;
case CRUSH_RULE_SET_CHOOSELEAF_TRIES:
if (curstep->arg1 > 0)
choose_leaf_tries = curstep->arg1;
break;
case CRUSH_RULE_SET_CHOOSE_LOCAL_TRIES:
if (curstep->arg1 >= 0)
choose_local_retries = curstep->arg1;
break;
case CRUSH_RULE_SET_CHOOSE_LOCAL_FALLBACK_TRIES:
if (curstep->arg1 >= 0)
choose_local_fallback_retries = curstep->arg1;
break;
case CRUSH_RULE_SET_CHOOSELEAF_VARY_R:
if (curstep->arg1 >= 0)
vary_r = curstep->arg1;
break;
case CRUSH_RULE_SET_CHOOSELEAF_STABLE:
if (curstep->arg1 >= 0)
stable = curstep->arg1;
break;
case CRUSH_RULE_CHOOSELEAF_FIRSTN:
case CRUSH_RULE_CHOOSE_FIRSTN:
firstn = 1;
fallthrough;
case CRUSH_RULE_CHOOSELEAF_INDEP:
case CRUSH_RULE_CHOOSE_INDEP:
if (wsize == 0)
break;
recurse_to_leaf =
curstep->op ==
CRUSH_RULE_CHOOSELEAF_FIRSTN ||
curstep->op ==
CRUSH_RULE_CHOOSELEAF_INDEP;
/* reset output */
osize = 0;
for (i = 0; i < wsize; i++) {
int bno;
numrep = curstep->arg1;
if (numrep <= 0) {
numrep += result_max;
if (numrep <= 0)
continue;
}
j = 0;
/* make sure bucket id is valid */
bno = -1 - w[i];
if (bno < 0 || bno >= map->max_buckets) {
/* w[i] is probably CRUSH_ITEM_NONE */
dprintk(" bad w[i] %d\n", w[i]);
continue;
}
if (firstn) {
int recurse_tries;
if (choose_leaf_tries)
recurse_tries =
choose_leaf_tries;
else if (map->chooseleaf_descend_once)
recurse_tries = 1;
else
recurse_tries = choose_tries;
osize += crush_choose_firstn(
map,
cw,
map->buckets[bno],
weight, weight_max,
x, numrep,
curstep->arg2,
o+osize, j,
result_max-osize,
choose_tries,
recurse_tries,
choose_local_retries,
choose_local_fallback_retries,
recurse_to_leaf,
vary_r,
stable,
c+osize,
0,
choose_args);
} else {
out_size = ((numrep < (result_max-osize)) ?
numrep : (result_max-osize));
crush_choose_indep(
map,
cw,
map->buckets[bno],
weight, weight_max,
x, out_size, numrep,
curstep->arg2,
o+osize, j,
choose_tries,
choose_leaf_tries ?
choose_leaf_tries : 1,
recurse_to_leaf,
c+osize,
0,
choose_args);
osize += out_size;
}
}
if (recurse_to_leaf)
/* copy final _leaf_ values to output set */
memcpy(o, c, osize*sizeof(*o));
/* swap o and w arrays */
swap(o, w);
wsize = osize;
break;
case CRUSH_RULE_EMIT:
for (i = 0; i < wsize && result_len < result_max; i++) {
result[result_len] = w[i];
result_len++;
}
wsize = 0;
break;
default:
dprintk(" unknown op %d at step %d\n",
curstep->op, step);
break;
}
}
return result_len;
}
| linux-master | net/ceph/crush/mapper.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* 6LoWPAN IPv6 Fragment Header compression according to RFC6282
*/
#include "nhc.h"
#define LOWPAN_NHC_FRAGMENT_ID_0 0xe4
#define LOWPAN_NHC_FRAGMENT_MASK_0 0xfe
LOWPAN_NHC(nhc_fragment, "RFC6282 Fragment", NEXTHDR_FRAGMENT, 0,
LOWPAN_NHC_FRAGMENT_ID_0, LOWPAN_NHC_FRAGMENT_MASK_0, NULL, NULL);
module_lowpan_nhc(nhc_fragment);
MODULE_DESCRIPTION("6LoWPAN next header RFC6282 Fragment compression");
MODULE_LICENSE("GPL");
| linux-master | net/6lowpan/nhc_fragment.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* 6LoWPAN Extension Header compression according to RFC7400
*/
#include "nhc.h"
#define LOWPAN_GHC_EXT_ROUTE_ID_0 0xb2
#define LOWPAN_GHC_EXT_ROUTE_MASK_0 0xfe
LOWPAN_NHC(ghc_ext_route, "RFC7400 Routing Extension Header", NEXTHDR_ROUTING,
0, LOWPAN_GHC_EXT_ROUTE_ID_0, LOWPAN_GHC_EXT_ROUTE_MASK_0, NULL, NULL);
module_lowpan_nhc(ghc_ext_route);
MODULE_DESCRIPTION("6LoWPAN generic header routing extension compression");
MODULE_LICENSE("GPL");
| linux-master | net/6lowpan/nhc_ghc_ext_route.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* 6LoWPAN IPv6 Routing Header compression according to RFC6282
*/
#include "nhc.h"
#define LOWPAN_NHC_ROUTING_ID_0 0xe2
#define LOWPAN_NHC_ROUTING_MASK_0 0xfe
LOWPAN_NHC(nhc_routing, "RFC6282 Routing", NEXTHDR_ROUTING, 0,
LOWPAN_NHC_ROUTING_ID_0, LOWPAN_NHC_ROUTING_MASK_0, NULL, NULL);
module_lowpan_nhc(nhc_routing);
MODULE_DESCRIPTION("6LoWPAN next header RFC6282 Routing compression");
MODULE_LICENSE("GPL");
| linux-master | net/6lowpan/nhc_routing.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* 6LoWPAN IPv6 Mobility Header compression according to RFC6282
*/
#include "nhc.h"
#define LOWPAN_NHC_MOBILITY_ID_0 0xe8
#define LOWPAN_NHC_MOBILITY_MASK_0 0xfe
LOWPAN_NHC(nhc_mobility, "RFC6282 Mobility", NEXTHDR_MOBILITY, 0,
LOWPAN_NHC_MOBILITY_ID_0, LOWPAN_NHC_MOBILITY_MASK_0, NULL, NULL);
module_lowpan_nhc(nhc_mobility);
MODULE_DESCRIPTION("6LoWPAN next header RFC6282 Mobility compression");
MODULE_LICENSE("GPL");
| linux-master | net/6lowpan/nhc_mobility.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Authors:
* (C) 2015 Pengutronix, Alexander Aring <[email protected]>
* Copyright (c) 2015 Nordic Semiconductor. All Rights Reserved.
*/
#include <net/6lowpan.h>
#include "6lowpan_i.h"
#define LOWPAN_DEBUGFS_CTX_PFX_NUM_ARGS 8
static struct dentry *lowpan_debugfs;
static int lowpan_ctx_flag_active_set(void *data, u64 val)
{
struct lowpan_iphc_ctx *ctx = data;
if (val != 0 && val != 1)
return -EINVAL;
if (val)
set_bit(LOWPAN_IPHC_CTX_FLAG_ACTIVE, &ctx->flags);
else
clear_bit(LOWPAN_IPHC_CTX_FLAG_ACTIVE, &ctx->flags);
return 0;
}
static int lowpan_ctx_flag_active_get(void *data, u64 *val)
{
*val = lowpan_iphc_ctx_is_active(data);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(lowpan_ctx_flag_active_fops,
lowpan_ctx_flag_active_get,
lowpan_ctx_flag_active_set, "%llu\n");
static int lowpan_ctx_flag_c_set(void *data, u64 val)
{
struct lowpan_iphc_ctx *ctx = data;
if (val != 0 && val != 1)
return -EINVAL;
if (val)
set_bit(LOWPAN_IPHC_CTX_FLAG_COMPRESSION, &ctx->flags);
else
clear_bit(LOWPAN_IPHC_CTX_FLAG_COMPRESSION, &ctx->flags);
return 0;
}
static int lowpan_ctx_flag_c_get(void *data, u64 *val)
{
*val = lowpan_iphc_ctx_is_compression(data);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(lowpan_ctx_flag_c_fops, lowpan_ctx_flag_c_get,
lowpan_ctx_flag_c_set, "%llu\n");
static int lowpan_ctx_plen_set(void *data, u64 val)
{
struct lowpan_iphc_ctx *ctx = data;
struct lowpan_iphc_ctx_table *t =
container_of(ctx, struct lowpan_iphc_ctx_table, table[ctx->id]);
if (val > 128)
return -EINVAL;
spin_lock_bh(&t->lock);
ctx->plen = val;
spin_unlock_bh(&t->lock);
return 0;
}
static int lowpan_ctx_plen_get(void *data, u64 *val)
{
struct lowpan_iphc_ctx *ctx = data;
struct lowpan_iphc_ctx_table *t =
container_of(ctx, struct lowpan_iphc_ctx_table, table[ctx->id]);
spin_lock_bh(&t->lock);
*val = ctx->plen;
spin_unlock_bh(&t->lock);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(lowpan_ctx_plen_fops, lowpan_ctx_plen_get,
lowpan_ctx_plen_set, "%llu\n");
static int lowpan_ctx_pfx_show(struct seq_file *file, void *offset)
{
struct lowpan_iphc_ctx *ctx = file->private;
struct lowpan_iphc_ctx_table *t =
container_of(ctx, struct lowpan_iphc_ctx_table, table[ctx->id]);
spin_lock_bh(&t->lock);
seq_printf(file, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
be16_to_cpu(ctx->pfx.s6_addr16[0]),
be16_to_cpu(ctx->pfx.s6_addr16[1]),
be16_to_cpu(ctx->pfx.s6_addr16[2]),
be16_to_cpu(ctx->pfx.s6_addr16[3]),
be16_to_cpu(ctx->pfx.s6_addr16[4]),
be16_to_cpu(ctx->pfx.s6_addr16[5]),
be16_to_cpu(ctx->pfx.s6_addr16[6]),
be16_to_cpu(ctx->pfx.s6_addr16[7]));
spin_unlock_bh(&t->lock);
return 0;
}
static int lowpan_ctx_pfx_open(struct inode *inode, struct file *file)
{
return single_open(file, lowpan_ctx_pfx_show, inode->i_private);
}
static ssize_t lowpan_ctx_pfx_write(struct file *fp,
const char __user *user_buf, size_t count,
loff_t *ppos)
{
char buf[128] = {};
struct seq_file *file = fp->private_data;
struct lowpan_iphc_ctx *ctx = file->private;
struct lowpan_iphc_ctx_table *t =
container_of(ctx, struct lowpan_iphc_ctx_table, table[ctx->id]);
int status = count, n, i;
unsigned int addr[8];
if (copy_from_user(&buf, user_buf, min_t(size_t, sizeof(buf) - 1,
count))) {
status = -EFAULT;
goto out;
}
n = sscanf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x",
&addr[0], &addr[1], &addr[2], &addr[3], &addr[4],
&addr[5], &addr[6], &addr[7]);
if (n != LOWPAN_DEBUGFS_CTX_PFX_NUM_ARGS) {
status = -EINVAL;
goto out;
}
spin_lock_bh(&t->lock);
for (i = 0; i < 8; i++)
ctx->pfx.s6_addr16[i] = cpu_to_be16(addr[i] & 0xffff);
spin_unlock_bh(&t->lock);
out:
return status;
}
static const struct file_operations lowpan_ctx_pfx_fops = {
.open = lowpan_ctx_pfx_open,
.read = seq_read,
.write = lowpan_ctx_pfx_write,
.llseek = seq_lseek,
.release = single_release,
};
static void lowpan_dev_debugfs_ctx_init(struct net_device *dev,
struct dentry *ctx, u8 id)
{
struct lowpan_dev *ldev = lowpan_dev(dev);
struct dentry *root;
char buf[32];
if (WARN_ON_ONCE(id >= LOWPAN_IPHC_CTX_TABLE_SIZE))
return;
sprintf(buf, "%d", id);
root = debugfs_create_dir(buf, ctx);
debugfs_create_file("active", 0644, root, &ldev->ctx.table[id],
&lowpan_ctx_flag_active_fops);
debugfs_create_file("compression", 0644, root, &ldev->ctx.table[id],
&lowpan_ctx_flag_c_fops);
debugfs_create_file("prefix", 0644, root, &ldev->ctx.table[id],
&lowpan_ctx_pfx_fops);
debugfs_create_file("prefix_len", 0644, root, &ldev->ctx.table[id],
&lowpan_ctx_plen_fops);
}
static int lowpan_context_show(struct seq_file *file, void *offset)
{
struct lowpan_iphc_ctx_table *t = file->private;
int i;
seq_printf(file, "%3s|%-43s|%c\n", "cid", "prefix", 'C');
seq_puts(file, "-------------------------------------------------\n");
spin_lock_bh(&t->lock);
for (i = 0; i < LOWPAN_IPHC_CTX_TABLE_SIZE; i++) {
if (!lowpan_iphc_ctx_is_active(&t->table[i]))
continue;
seq_printf(file, "%3d|%39pI6c/%-3d|%d\n", t->table[i].id,
&t->table[i].pfx, t->table[i].plen,
lowpan_iphc_ctx_is_compression(&t->table[i]));
}
spin_unlock_bh(&t->lock);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(lowpan_context);
static int lowpan_short_addr_get(void *data, u64 *val)
{
struct wpan_dev *wdev = data;
rtnl_lock();
*val = le16_to_cpu(wdev->short_addr);
rtnl_unlock();
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(lowpan_short_addr_fops, lowpan_short_addr_get, NULL,
"0x%04llx\n");
static void lowpan_dev_debugfs_802154_init(const struct net_device *dev,
struct lowpan_dev *ldev)
{
struct dentry *root;
if (!lowpan_is_ll(dev, LOWPAN_LLTYPE_IEEE802154))
return;
root = debugfs_create_dir("ieee802154", ldev->iface_debugfs);
debugfs_create_file("short_addr", 0444, root,
lowpan_802154_dev(dev)->wdev->ieee802154_ptr,
&lowpan_short_addr_fops);
}
void lowpan_dev_debugfs_init(struct net_device *dev)
{
struct lowpan_dev *ldev = lowpan_dev(dev);
struct dentry *contexts;
int i;
/* creating the root */
ldev->iface_debugfs = debugfs_create_dir(dev->name, lowpan_debugfs);
contexts = debugfs_create_dir("contexts", ldev->iface_debugfs);
debugfs_create_file("show", 0644, contexts, &lowpan_dev(dev)->ctx,
&lowpan_context_fops);
for (i = 0; i < LOWPAN_IPHC_CTX_TABLE_SIZE; i++)
lowpan_dev_debugfs_ctx_init(dev, contexts, i);
lowpan_dev_debugfs_802154_init(dev, ldev);
}
void lowpan_dev_debugfs_exit(struct net_device *dev)
{
debugfs_remove_recursive(lowpan_dev(dev)->iface_debugfs);
}
void __init lowpan_debugfs_init(void)
{
lowpan_debugfs = debugfs_create_dir("6lowpan", NULL);
}
void lowpan_debugfs_exit(void)
{
debugfs_remove_recursive(lowpan_debugfs);
}
| linux-master | net/6lowpan/debugfs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* 6LoWPAN ICMPv6 compression according to RFC7400
*/
#include "nhc.h"
#define LOWPAN_GHC_ICMPV6_ID_0 0xdf
#define LOWPAN_GHC_ICMPV6_MASK_0 0xff
LOWPAN_NHC(ghc_icmpv6, "RFC7400 ICMPv6", NEXTHDR_ICMP, 0,
LOWPAN_GHC_ICMPV6_ID_0, LOWPAN_GHC_ICMPV6_MASK_0, NULL, NULL);
module_lowpan_nhc(ghc_icmpv6);
MODULE_DESCRIPTION("6LoWPAN generic header ICMPv6 compression");
MODULE_LICENSE("GPL");
| linux-master | net/6lowpan/nhc_ghc_icmpv6.c |
/*
* Copyright 2011, Siemens AG
* written by Alexander Smirnov <[email protected]>
*/
/* Based on patches from Jon Smirl <[email protected]>
* Copyright (c) 2011 Jon Smirl <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
/* Jon's code is based on 6lowpan implementation for Contiki which is:
* Copyright (c) 2008, Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the Institute nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <linux/bitops.h>
#include <linux/if_arp.h>
#include <linux/netdevice.h>
#include <net/6lowpan.h>
#include <net/ipv6.h>
#include "6lowpan_i.h"
#include "nhc.h"
/* Values of fields within the IPHC encoding first byte */
#define LOWPAN_IPHC_TF_MASK 0x18
#define LOWPAN_IPHC_TF_00 0x00
#define LOWPAN_IPHC_TF_01 0x08
#define LOWPAN_IPHC_TF_10 0x10
#define LOWPAN_IPHC_TF_11 0x18
#define LOWPAN_IPHC_NH 0x04
#define LOWPAN_IPHC_HLIM_MASK 0x03
#define LOWPAN_IPHC_HLIM_00 0x00
#define LOWPAN_IPHC_HLIM_01 0x01
#define LOWPAN_IPHC_HLIM_10 0x02
#define LOWPAN_IPHC_HLIM_11 0x03
/* Values of fields within the IPHC encoding second byte */
#define LOWPAN_IPHC_CID 0x80
#define LOWPAN_IPHC_SAC 0x40
#define LOWPAN_IPHC_SAM_MASK 0x30
#define LOWPAN_IPHC_SAM_00 0x00
#define LOWPAN_IPHC_SAM_01 0x10
#define LOWPAN_IPHC_SAM_10 0x20
#define LOWPAN_IPHC_SAM_11 0x30
#define LOWPAN_IPHC_M 0x08
#define LOWPAN_IPHC_DAC 0x04
#define LOWPAN_IPHC_DAM_MASK 0x03
#define LOWPAN_IPHC_DAM_00 0x00
#define LOWPAN_IPHC_DAM_01 0x01
#define LOWPAN_IPHC_DAM_10 0x02
#define LOWPAN_IPHC_DAM_11 0x03
/* ipv6 address based on mac
* second bit-flip (Universe/Local) is done according RFC2464
*/
#define is_addr_mac_addr_based(a, m) \
((((a)->s6_addr[8]) == (((m)[0]) ^ 0x02)) && \
(((a)->s6_addr[9]) == (m)[1]) && \
(((a)->s6_addr[10]) == (m)[2]) && \
(((a)->s6_addr[11]) == (m)[3]) && \
(((a)->s6_addr[12]) == (m)[4]) && \
(((a)->s6_addr[13]) == (m)[5]) && \
(((a)->s6_addr[14]) == (m)[6]) && \
(((a)->s6_addr[15]) == (m)[7]))
/* check whether we can compress the IID to 16 bits,
* it's possible for unicast addresses with first 49 bits are zero only.
*/
#define lowpan_is_iid_16_bit_compressable(a) \
((((a)->s6_addr16[4]) == 0) && \
(((a)->s6_addr[10]) == 0) && \
(((a)->s6_addr[11]) == 0xff) && \
(((a)->s6_addr[12]) == 0xfe) && \
(((a)->s6_addr[13]) == 0))
/* check whether the 112-bit gid of the multicast address is mappable to: */
/* 48 bits, FFXX::00XX:XXXX:XXXX */
#define lowpan_is_mcast_addr_compressable48(a) \
((((a)->s6_addr16[1]) == 0) && \
(((a)->s6_addr16[2]) == 0) && \
(((a)->s6_addr16[3]) == 0) && \
(((a)->s6_addr16[4]) == 0) && \
(((a)->s6_addr[10]) == 0))
/* 32 bits, FFXX::00XX:XXXX */
#define lowpan_is_mcast_addr_compressable32(a) \
((((a)->s6_addr16[1]) == 0) && \
(((a)->s6_addr16[2]) == 0) && \
(((a)->s6_addr16[3]) == 0) && \
(((a)->s6_addr16[4]) == 0) && \
(((a)->s6_addr16[5]) == 0) && \
(((a)->s6_addr[12]) == 0))
/* 8 bits, FF02::00XX */
#define lowpan_is_mcast_addr_compressable8(a) \
((((a)->s6_addr[1]) == 2) && \
(((a)->s6_addr16[1]) == 0) && \
(((a)->s6_addr16[2]) == 0) && \
(((a)->s6_addr16[3]) == 0) && \
(((a)->s6_addr16[4]) == 0) && \
(((a)->s6_addr16[5]) == 0) && \
(((a)->s6_addr16[6]) == 0) && \
(((a)->s6_addr[14]) == 0))
#define lowpan_is_linklocal_zero_padded(a) \
(!(hdr->saddr.s6_addr[1] & 0x3f) && \
!hdr->saddr.s6_addr16[1] && \
!hdr->saddr.s6_addr32[1])
#define LOWPAN_IPHC_CID_DCI(cid) (cid & 0x0f)
#define LOWPAN_IPHC_CID_SCI(cid) ((cid & 0xf0) >> 4)
static inline void
lowpan_iphc_uncompress_802154_lladdr(struct in6_addr *ipaddr,
const void *lladdr)
{
const struct ieee802154_addr *addr = lladdr;
u8 eui64[EUI64_ADDR_LEN];
switch (addr->mode) {
case IEEE802154_ADDR_LONG:
ieee802154_le64_to_be64(eui64, &addr->extended_addr);
lowpan_iphc_uncompress_eui64_lladdr(ipaddr, eui64);
break;
case IEEE802154_ADDR_SHORT:
/* fe:80::ff:fe00:XXXX
* \__/
* short_addr
*
* Universe/Local bit is zero.
*/
ipaddr->s6_addr[0] = 0xFE;
ipaddr->s6_addr[1] = 0x80;
ipaddr->s6_addr[11] = 0xFF;
ipaddr->s6_addr[12] = 0xFE;
ieee802154_le16_to_be16(&ipaddr->s6_addr16[7],
&addr->short_addr);
break;
default:
/* should never handled and filtered by 802154 6lowpan */
WARN_ON_ONCE(1);
break;
}
}
static struct lowpan_iphc_ctx *
lowpan_iphc_ctx_get_by_id(const struct net_device *dev, u8 id)
{
struct lowpan_iphc_ctx *ret = &lowpan_dev(dev)->ctx.table[id];
if (!lowpan_iphc_ctx_is_active(ret))
return NULL;
return ret;
}
static struct lowpan_iphc_ctx *
lowpan_iphc_ctx_get_by_addr(const struct net_device *dev,
const struct in6_addr *addr)
{
struct lowpan_iphc_ctx *table = lowpan_dev(dev)->ctx.table;
struct lowpan_iphc_ctx *ret = NULL;
struct in6_addr addr_pfx;
u8 addr_plen;
int i;
for (i = 0; i < LOWPAN_IPHC_CTX_TABLE_SIZE; i++) {
/* Check if context is valid. A context that is not valid
* MUST NOT be used for compression.
*/
if (!lowpan_iphc_ctx_is_active(&table[i]) ||
!lowpan_iphc_ctx_is_compression(&table[i]))
continue;
ipv6_addr_prefix(&addr_pfx, addr, table[i].plen);
/* if prefix len < 64, the remaining bits until 64th bit is
* zero. Otherwise we use table[i]->plen.
*/
if (table[i].plen < 64)
addr_plen = 64;
else
addr_plen = table[i].plen;
if (ipv6_prefix_equal(&addr_pfx, &table[i].pfx, addr_plen)) {
/* remember first match */
if (!ret) {
ret = &table[i];
continue;
}
/* get the context with longest prefix len */
if (table[i].plen > ret->plen)
ret = &table[i];
}
}
return ret;
}
static struct lowpan_iphc_ctx *
lowpan_iphc_ctx_get_by_mcast_addr(const struct net_device *dev,
const struct in6_addr *addr)
{
struct lowpan_iphc_ctx *table = lowpan_dev(dev)->ctx.table;
struct lowpan_iphc_ctx *ret = NULL;
struct in6_addr addr_mcast, network_pfx = {};
int i;
/* init mcast address with */
memcpy(&addr_mcast, addr, sizeof(*addr));
for (i = 0; i < LOWPAN_IPHC_CTX_TABLE_SIZE; i++) {
/* Check if context is valid. A context that is not valid
* MUST NOT be used for compression.
*/
if (!lowpan_iphc_ctx_is_active(&table[i]) ||
!lowpan_iphc_ctx_is_compression(&table[i]))
continue;
/* setting plen */
addr_mcast.s6_addr[3] = table[i].plen;
/* get network prefix to copy into multicast address */
ipv6_addr_prefix(&network_pfx, &table[i].pfx,
table[i].plen);
/* setting network prefix */
memcpy(&addr_mcast.s6_addr[4], &network_pfx, 8);
if (ipv6_addr_equal(addr, &addr_mcast)) {
ret = &table[i];
break;
}
}
return ret;
}
static void lowpan_iphc_uncompress_lladdr(const struct net_device *dev,
struct in6_addr *ipaddr,
const void *lladdr)
{
switch (dev->addr_len) {
case ETH_ALEN:
lowpan_iphc_uncompress_eui48_lladdr(ipaddr, lladdr);
break;
case EUI64_ADDR_LEN:
lowpan_iphc_uncompress_eui64_lladdr(ipaddr, lladdr);
break;
default:
WARN_ON_ONCE(1);
break;
}
}
/* Uncompress address function for source and
* destination address(non-multicast).
*
* address_mode is the masked value for sam or dam value
*/
static int lowpan_iphc_uncompress_addr(struct sk_buff *skb,
const struct net_device *dev,
struct in6_addr *ipaddr,
u8 address_mode, const void *lladdr)
{
bool fail;
switch (address_mode) {
/* SAM and DAM are the same here */
case LOWPAN_IPHC_DAM_00:
/* for global link addresses */
fail = lowpan_fetch_skb(skb, ipaddr->s6_addr, 16);
break;
case LOWPAN_IPHC_SAM_01:
case LOWPAN_IPHC_DAM_01:
/* fe:80::XXXX:XXXX:XXXX:XXXX */
ipaddr->s6_addr[0] = 0xFE;
ipaddr->s6_addr[1] = 0x80;
fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[8], 8);
break;
case LOWPAN_IPHC_SAM_10:
case LOWPAN_IPHC_DAM_10:
/* fe:80::ff:fe00:XXXX */
ipaddr->s6_addr[0] = 0xFE;
ipaddr->s6_addr[1] = 0x80;
ipaddr->s6_addr[11] = 0xFF;
ipaddr->s6_addr[12] = 0xFE;
fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[14], 2);
break;
case LOWPAN_IPHC_SAM_11:
case LOWPAN_IPHC_DAM_11:
fail = false;
switch (lowpan_dev(dev)->lltype) {
case LOWPAN_LLTYPE_IEEE802154:
lowpan_iphc_uncompress_802154_lladdr(ipaddr, lladdr);
break;
default:
lowpan_iphc_uncompress_lladdr(dev, ipaddr, lladdr);
break;
}
break;
default:
pr_debug("Invalid address mode value: 0x%x\n", address_mode);
return -EINVAL;
}
if (fail) {
pr_debug("Failed to fetch skb data\n");
return -EIO;
}
raw_dump_inline(NULL, "Reconstructed ipv6 addr is",
ipaddr->s6_addr, 16);
return 0;
}
/* Uncompress address function for source context
* based address(non-multicast).
*/
static int lowpan_iphc_uncompress_ctx_addr(struct sk_buff *skb,
const struct net_device *dev,
const struct lowpan_iphc_ctx *ctx,
struct in6_addr *ipaddr,
u8 address_mode, const void *lladdr)
{
bool fail;
switch (address_mode) {
/* SAM and DAM are the same here */
case LOWPAN_IPHC_DAM_00:
fail = false;
/* SAM_00 -> unspec address ::
* Do nothing, address is already ::
*
* DAM 00 -> reserved should never occur.
*/
break;
case LOWPAN_IPHC_SAM_01:
case LOWPAN_IPHC_DAM_01:
fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[8], 8);
ipv6_addr_prefix_copy(ipaddr, &ctx->pfx, ctx->plen);
break;
case LOWPAN_IPHC_SAM_10:
case LOWPAN_IPHC_DAM_10:
ipaddr->s6_addr[11] = 0xFF;
ipaddr->s6_addr[12] = 0xFE;
fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[14], 2);
ipv6_addr_prefix_copy(ipaddr, &ctx->pfx, ctx->plen);
break;
case LOWPAN_IPHC_SAM_11:
case LOWPAN_IPHC_DAM_11:
fail = false;
switch (lowpan_dev(dev)->lltype) {
case LOWPAN_LLTYPE_IEEE802154:
lowpan_iphc_uncompress_802154_lladdr(ipaddr, lladdr);
break;
default:
lowpan_iphc_uncompress_lladdr(dev, ipaddr, lladdr);
break;
}
ipv6_addr_prefix_copy(ipaddr, &ctx->pfx, ctx->plen);
break;
default:
pr_debug("Invalid sam value: 0x%x\n", address_mode);
return -EINVAL;
}
if (fail) {
pr_debug("Failed to fetch skb data\n");
return -EIO;
}
raw_dump_inline(NULL,
"Reconstructed context based ipv6 src addr is",
ipaddr->s6_addr, 16);
return 0;
}
/* Uncompress function for multicast destination address,
* when M bit is set.
*/
static int lowpan_uncompress_multicast_daddr(struct sk_buff *skb,
struct in6_addr *ipaddr,
u8 address_mode)
{
bool fail;
switch (address_mode) {
case LOWPAN_IPHC_DAM_00:
/* 00: 128 bits. The full address
* is carried in-line.
*/
fail = lowpan_fetch_skb(skb, ipaddr->s6_addr, 16);
break;
case LOWPAN_IPHC_DAM_01:
/* 01: 48 bits. The address takes
* the form ffXX::00XX:XXXX:XXXX.
*/
ipaddr->s6_addr[0] = 0xFF;
fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[1], 1);
fail |= lowpan_fetch_skb(skb, &ipaddr->s6_addr[11], 5);
break;
case LOWPAN_IPHC_DAM_10:
/* 10: 32 bits. The address takes
* the form ffXX::00XX:XXXX.
*/
ipaddr->s6_addr[0] = 0xFF;
fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[1], 1);
fail |= lowpan_fetch_skb(skb, &ipaddr->s6_addr[13], 3);
break;
case LOWPAN_IPHC_DAM_11:
/* 11: 8 bits. The address takes
* the form ff02::00XX.
*/
ipaddr->s6_addr[0] = 0xFF;
ipaddr->s6_addr[1] = 0x02;
fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[15], 1);
break;
default:
pr_debug("DAM value has a wrong value: 0x%x\n", address_mode);
return -EINVAL;
}
if (fail) {
pr_debug("Failed to fetch skb data\n");
return -EIO;
}
raw_dump_inline(NULL, "Reconstructed ipv6 multicast addr is",
ipaddr->s6_addr, 16);
return 0;
}
static int lowpan_uncompress_multicast_ctx_daddr(struct sk_buff *skb,
struct lowpan_iphc_ctx *ctx,
struct in6_addr *ipaddr,
u8 address_mode)
{
struct in6_addr network_pfx = {};
bool fail;
ipaddr->s6_addr[0] = 0xFF;
fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[1], 2);
fail |= lowpan_fetch_skb(skb, &ipaddr->s6_addr[12], 4);
if (fail)
return -EIO;
/* take prefix_len and network prefix from the context */
ipaddr->s6_addr[3] = ctx->plen;
/* get network prefix to copy into multicast address */
ipv6_addr_prefix(&network_pfx, &ctx->pfx, ctx->plen);
/* setting network prefix */
memcpy(&ipaddr->s6_addr[4], &network_pfx, 8);
return 0;
}
/* get the ecn values from iphc tf format and set it to ipv6hdr */
static inline void lowpan_iphc_tf_set_ecn(struct ipv6hdr *hdr, const u8 *tf)
{
/* get the two higher bits which is ecn */
u8 ecn = tf[0] & 0xc0;
/* ECN takes 0x30 in hdr->flow_lbl[0] */
hdr->flow_lbl[0] |= (ecn >> 2);
}
/* get the dscp values from iphc tf format and set it to ipv6hdr */
static inline void lowpan_iphc_tf_set_dscp(struct ipv6hdr *hdr, const u8 *tf)
{
/* DSCP is at place after ECN */
u8 dscp = tf[0] & 0x3f;
/* The four highest bits need to be set at hdr->priority */
hdr->priority |= ((dscp & 0x3c) >> 2);
/* The two lower bits is part of hdr->flow_lbl[0] */
hdr->flow_lbl[0] |= ((dscp & 0x03) << 6);
}
/* get the flow label values from iphc tf format and set it to ipv6hdr */
static inline void lowpan_iphc_tf_set_lbl(struct ipv6hdr *hdr, const u8 *lbl)
{
/* flow label is always some array started with lower nibble of
* flow_lbl[0] and followed with two bytes afterwards. Inside inline
* data the flow_lbl position can be different, which will be handled
* by lbl pointer. E.g. case "01" vs "00" the traffic class is 8 bit
* shifted, the different lbl pointer will handle that.
*
* The flow label will started at lower nibble of flow_lbl[0], the
* higher nibbles are part of DSCP + ECN.
*/
hdr->flow_lbl[0] |= lbl[0] & 0x0f;
memcpy(&hdr->flow_lbl[1], &lbl[1], 2);
}
/* lowpan_iphc_tf_decompress - decompress the traffic class.
* This function will return zero on success, a value lower than zero if
* failed.
*/
static int lowpan_iphc_tf_decompress(struct sk_buff *skb, struct ipv6hdr *hdr,
u8 val)
{
u8 tf[4];
/* Traffic Class and Flow Label */
switch (val) {
case LOWPAN_IPHC_TF_00:
/* ECN + DSCP + 4-bit Pad + Flow Label (4 bytes) */
if (lowpan_fetch_skb(skb, tf, 4))
return -EINVAL;
/* 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |ECN| DSCP | rsv | Flow Label |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
lowpan_iphc_tf_set_ecn(hdr, tf);
lowpan_iphc_tf_set_dscp(hdr, tf);
lowpan_iphc_tf_set_lbl(hdr, &tf[1]);
break;
case LOWPAN_IPHC_TF_01:
/* ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided. */
if (lowpan_fetch_skb(skb, tf, 3))
return -EINVAL;
/* 1 2
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |ECN|rsv| Flow Label |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
lowpan_iphc_tf_set_ecn(hdr, tf);
lowpan_iphc_tf_set_lbl(hdr, &tf[0]);
break;
case LOWPAN_IPHC_TF_10:
/* ECN + DSCP (1 byte), Flow Label is elided. */
if (lowpan_fetch_skb(skb, tf, 1))
return -EINVAL;
/* 0 1 2 3 4 5 6 7
* +-+-+-+-+-+-+-+-+
* |ECN| DSCP |
* +-+-+-+-+-+-+-+-+
*/
lowpan_iphc_tf_set_ecn(hdr, tf);
lowpan_iphc_tf_set_dscp(hdr, tf);
break;
case LOWPAN_IPHC_TF_11:
/* Traffic Class and Flow Label are elided */
break;
default:
WARN_ON_ONCE(1);
return -EINVAL;
}
return 0;
}
/* TTL uncompression values */
static const u8 lowpan_ttl_values[] = {
[LOWPAN_IPHC_HLIM_01] = 1,
[LOWPAN_IPHC_HLIM_10] = 64,
[LOWPAN_IPHC_HLIM_11] = 255,
};
int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev,
const void *daddr, const void *saddr)
{
struct ipv6hdr hdr = {};
struct lowpan_iphc_ctx *ci;
u8 iphc0, iphc1, cid = 0;
int err;
raw_dump_table(__func__, "raw skb data dump uncompressed",
skb->data, skb->len);
if (lowpan_fetch_skb(skb, &iphc0, sizeof(iphc0)) ||
lowpan_fetch_skb(skb, &iphc1, sizeof(iphc1)))
return -EINVAL;
hdr.version = 6;
/* default CID = 0, another if the CID flag is set */
if (iphc1 & LOWPAN_IPHC_CID) {
if (lowpan_fetch_skb(skb, &cid, sizeof(cid)))
return -EINVAL;
}
err = lowpan_iphc_tf_decompress(skb, &hdr,
iphc0 & LOWPAN_IPHC_TF_MASK);
if (err < 0)
return err;
/* Next Header */
if (!(iphc0 & LOWPAN_IPHC_NH)) {
/* Next header is carried inline */
if (lowpan_fetch_skb(skb, &hdr.nexthdr, sizeof(hdr.nexthdr)))
return -EINVAL;
pr_debug("NH flag is set, next header carried inline: %02x\n",
hdr.nexthdr);
}
/* Hop Limit */
if ((iphc0 & LOWPAN_IPHC_HLIM_MASK) != LOWPAN_IPHC_HLIM_00) {
hdr.hop_limit = lowpan_ttl_values[iphc0 & LOWPAN_IPHC_HLIM_MASK];
} else {
if (lowpan_fetch_skb(skb, &hdr.hop_limit,
sizeof(hdr.hop_limit)))
return -EINVAL;
}
if (iphc1 & LOWPAN_IPHC_SAC) {
spin_lock_bh(&lowpan_dev(dev)->ctx.lock);
ci = lowpan_iphc_ctx_get_by_id(dev, LOWPAN_IPHC_CID_SCI(cid));
if (!ci) {
spin_unlock_bh(&lowpan_dev(dev)->ctx.lock);
return -EINVAL;
}
pr_debug("SAC bit is set. Handle context based source address.\n");
err = lowpan_iphc_uncompress_ctx_addr(skb, dev, ci, &hdr.saddr,
iphc1 & LOWPAN_IPHC_SAM_MASK,
saddr);
spin_unlock_bh(&lowpan_dev(dev)->ctx.lock);
} else {
/* Source address uncompression */
pr_debug("source address stateless compression\n");
err = lowpan_iphc_uncompress_addr(skb, dev, &hdr.saddr,
iphc1 & LOWPAN_IPHC_SAM_MASK,
saddr);
}
/* Check on error of previous branch */
if (err)
return -EINVAL;
switch (iphc1 & (LOWPAN_IPHC_M | LOWPAN_IPHC_DAC)) {
case LOWPAN_IPHC_M | LOWPAN_IPHC_DAC:
skb->pkt_type = PACKET_BROADCAST;
spin_lock_bh(&lowpan_dev(dev)->ctx.lock);
ci = lowpan_iphc_ctx_get_by_id(dev, LOWPAN_IPHC_CID_DCI(cid));
if (!ci) {
spin_unlock_bh(&lowpan_dev(dev)->ctx.lock);
return -EINVAL;
}
/* multicast with context */
pr_debug("dest: context-based mcast compression\n");
err = lowpan_uncompress_multicast_ctx_daddr(skb, ci,
&hdr.daddr,
iphc1 & LOWPAN_IPHC_DAM_MASK);
spin_unlock_bh(&lowpan_dev(dev)->ctx.lock);
break;
case LOWPAN_IPHC_M:
skb->pkt_type = PACKET_BROADCAST;
/* multicast */
err = lowpan_uncompress_multicast_daddr(skb, &hdr.daddr,
iphc1 & LOWPAN_IPHC_DAM_MASK);
break;
case LOWPAN_IPHC_DAC:
skb->pkt_type = PACKET_HOST;
spin_lock_bh(&lowpan_dev(dev)->ctx.lock);
ci = lowpan_iphc_ctx_get_by_id(dev, LOWPAN_IPHC_CID_DCI(cid));
if (!ci) {
spin_unlock_bh(&lowpan_dev(dev)->ctx.lock);
return -EINVAL;
}
/* Destination address context based uncompression */
pr_debug("DAC bit is set. Handle context based destination address.\n");
err = lowpan_iphc_uncompress_ctx_addr(skb, dev, ci, &hdr.daddr,
iphc1 & LOWPAN_IPHC_DAM_MASK,
daddr);
spin_unlock_bh(&lowpan_dev(dev)->ctx.lock);
break;
default:
skb->pkt_type = PACKET_HOST;
err = lowpan_iphc_uncompress_addr(skb, dev, &hdr.daddr,
iphc1 & LOWPAN_IPHC_DAM_MASK,
daddr);
pr_debug("dest: stateless compression mode %d dest %pI6c\n",
iphc1 & LOWPAN_IPHC_DAM_MASK, &hdr.daddr);
break;
}
if (err)
return -EINVAL;
/* Next header data uncompression */
if (iphc0 & LOWPAN_IPHC_NH) {
err = lowpan_nhc_do_uncompression(skb, dev, &hdr);
if (err < 0)
return err;
} else {
err = skb_cow(skb, sizeof(hdr));
if (unlikely(err))
return err;
}
switch (lowpan_dev(dev)->lltype) {
case LOWPAN_LLTYPE_IEEE802154:
if (lowpan_802154_cb(skb)->d_size)
hdr.payload_len = htons(lowpan_802154_cb(skb)->d_size -
sizeof(struct ipv6hdr));
else
hdr.payload_len = htons(skb->len);
break;
default:
hdr.payload_len = htons(skb->len);
break;
}
pr_debug("skb headroom size = %d, data length = %d\n",
skb_headroom(skb), skb->len);
pr_debug("IPv6 header dump:\n\tversion = %d\n\tlength = %d\n\t"
"nexthdr = 0x%02x\n\thop_lim = %d\n\tdest = %pI6c\n",
hdr.version, ntohs(hdr.payload_len), hdr.nexthdr,
hdr.hop_limit, &hdr.daddr);
skb_push(skb, sizeof(hdr));
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
skb_copy_to_linear_data(skb, &hdr, sizeof(hdr));
raw_dump_table(__func__, "raw header dump", (u8 *)&hdr, sizeof(hdr));
return 0;
}
EXPORT_SYMBOL_GPL(lowpan_header_decompress);
static const u8 lowpan_iphc_dam_to_sam_value[] = {
[LOWPAN_IPHC_DAM_00] = LOWPAN_IPHC_SAM_00,
[LOWPAN_IPHC_DAM_01] = LOWPAN_IPHC_SAM_01,
[LOWPAN_IPHC_DAM_10] = LOWPAN_IPHC_SAM_10,
[LOWPAN_IPHC_DAM_11] = LOWPAN_IPHC_SAM_11,
};
static inline bool
lowpan_iphc_compress_ctx_802154_lladdr(const struct in6_addr *ipaddr,
const struct lowpan_iphc_ctx *ctx,
const void *lladdr)
{
const struct ieee802154_addr *addr = lladdr;
unsigned char extended_addr[EUI64_ADDR_LEN];
bool lladdr_compress = false;
struct in6_addr tmp = {};
switch (addr->mode) {
case IEEE802154_ADDR_LONG:
ieee802154_le64_to_be64(&extended_addr, &addr->extended_addr);
/* check for SAM/DAM = 11 */
memcpy(&tmp.s6_addr[8], &extended_addr, EUI64_ADDR_LEN);
/* second bit-flip (Universe/Local) is done according RFC2464 */
tmp.s6_addr[8] ^= 0x02;
/* context information are always used */
ipv6_addr_prefix_copy(&tmp, &ctx->pfx, ctx->plen);
if (ipv6_addr_equal(&tmp, ipaddr))
lladdr_compress = true;
break;
case IEEE802154_ADDR_SHORT:
tmp.s6_addr[11] = 0xFF;
tmp.s6_addr[12] = 0xFE;
ieee802154_le16_to_be16(&tmp.s6_addr16[7],
&addr->short_addr);
/* context information are always used */
ipv6_addr_prefix_copy(&tmp, &ctx->pfx, ctx->plen);
if (ipv6_addr_equal(&tmp, ipaddr))
lladdr_compress = true;
break;
default:
/* should never handled and filtered by 802154 6lowpan */
WARN_ON_ONCE(1);
break;
}
return lladdr_compress;
}
static bool lowpan_iphc_addr_equal(const struct net_device *dev,
const struct lowpan_iphc_ctx *ctx,
const struct in6_addr *ipaddr,
const void *lladdr)
{
struct in6_addr tmp = {};
lowpan_iphc_uncompress_lladdr(dev, &tmp, lladdr);
if (ctx)
ipv6_addr_prefix_copy(&tmp, &ctx->pfx, ctx->plen);
return ipv6_addr_equal(&tmp, ipaddr);
}
static u8 lowpan_compress_ctx_addr(u8 **hc_ptr, const struct net_device *dev,
const struct in6_addr *ipaddr,
const struct lowpan_iphc_ctx *ctx,
const unsigned char *lladdr, bool sam)
{
struct in6_addr tmp;
u8 dam;
switch (lowpan_dev(dev)->lltype) {
case LOWPAN_LLTYPE_IEEE802154:
if (lowpan_iphc_compress_ctx_802154_lladdr(ipaddr, ctx,
lladdr)) {
dam = LOWPAN_IPHC_DAM_11;
goto out;
}
break;
default:
if (lowpan_iphc_addr_equal(dev, ctx, ipaddr, lladdr)) {
dam = LOWPAN_IPHC_DAM_11;
goto out;
}
break;
}
memset(&tmp, 0, sizeof(tmp));
/* check for SAM/DAM = 10 */
tmp.s6_addr[11] = 0xFF;
tmp.s6_addr[12] = 0xFE;
memcpy(&tmp.s6_addr[14], &ipaddr->s6_addr[14], 2);
/* context information are always used */
ipv6_addr_prefix_copy(&tmp, &ctx->pfx, ctx->plen);
if (ipv6_addr_equal(&tmp, ipaddr)) {
lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr[14], 2);
dam = LOWPAN_IPHC_DAM_10;
goto out;
}
memset(&tmp, 0, sizeof(tmp));
/* check for SAM/DAM = 01, should always match */
memcpy(&tmp.s6_addr[8], &ipaddr->s6_addr[8], 8);
/* context information are always used */
ipv6_addr_prefix_copy(&tmp, &ctx->pfx, ctx->plen);
if (ipv6_addr_equal(&tmp, ipaddr)) {
lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr[8], 8);
dam = LOWPAN_IPHC_DAM_01;
goto out;
}
WARN_ONCE(1, "context found but no address mode matched\n");
return LOWPAN_IPHC_DAM_00;
out:
if (sam)
return lowpan_iphc_dam_to_sam_value[dam];
else
return dam;
}
static inline bool
lowpan_iphc_compress_802154_lladdr(const struct in6_addr *ipaddr,
const void *lladdr)
{
const struct ieee802154_addr *addr = lladdr;
unsigned char extended_addr[EUI64_ADDR_LEN];
bool lladdr_compress = false;
struct in6_addr tmp = {};
switch (addr->mode) {
case IEEE802154_ADDR_LONG:
ieee802154_le64_to_be64(&extended_addr, &addr->extended_addr);
if (is_addr_mac_addr_based(ipaddr, extended_addr))
lladdr_compress = true;
break;
case IEEE802154_ADDR_SHORT:
/* fe:80::ff:fe00:XXXX
* \__/
* short_addr
*
* Universe/Local bit is zero.
*/
tmp.s6_addr[0] = 0xFE;
tmp.s6_addr[1] = 0x80;
tmp.s6_addr[11] = 0xFF;
tmp.s6_addr[12] = 0xFE;
ieee802154_le16_to_be16(&tmp.s6_addr16[7],
&addr->short_addr);
if (ipv6_addr_equal(&tmp, ipaddr))
lladdr_compress = true;
break;
default:
/* should never handled and filtered by 802154 6lowpan */
WARN_ON_ONCE(1);
break;
}
return lladdr_compress;
}
static u8 lowpan_compress_addr_64(u8 **hc_ptr, const struct net_device *dev,
const struct in6_addr *ipaddr,
const unsigned char *lladdr, bool sam)
{
u8 dam = LOWPAN_IPHC_DAM_01;
switch (lowpan_dev(dev)->lltype) {
case LOWPAN_LLTYPE_IEEE802154:
if (lowpan_iphc_compress_802154_lladdr(ipaddr, lladdr)) {
dam = LOWPAN_IPHC_DAM_11; /* 0-bits */
pr_debug("address compression 0 bits\n");
goto out;
}
break;
default:
if (lowpan_iphc_addr_equal(dev, NULL, ipaddr, lladdr)) {
dam = LOWPAN_IPHC_DAM_11;
pr_debug("address compression 0 bits\n");
goto out;
}
break;
}
if (lowpan_is_iid_16_bit_compressable(ipaddr)) {
/* compress IID to 16 bits xxxx::XXXX */
lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr16[7], 2);
dam = LOWPAN_IPHC_DAM_10; /* 16-bits */
raw_dump_inline(NULL, "Compressed ipv6 addr is (16 bits)",
*hc_ptr - 2, 2);
goto out;
}
/* do not compress IID => xxxx::IID */
lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr16[4], 8);
raw_dump_inline(NULL, "Compressed ipv6 addr is (64 bits)",
*hc_ptr - 8, 8);
out:
if (sam)
return lowpan_iphc_dam_to_sam_value[dam];
else
return dam;
}
/* lowpan_iphc_get_tc - get the ECN + DCSP fields in hc format */
static inline u8 lowpan_iphc_get_tc(const struct ipv6hdr *hdr)
{
u8 dscp, ecn;
/* hdr->priority contains the higher bits of dscp, lower are part of
* flow_lbl[0]. Note ECN, DCSP is swapped in ipv6 hdr.
*/
dscp = (hdr->priority << 2) | ((hdr->flow_lbl[0] & 0xc0) >> 6);
/* ECN is at the two lower bits from first nibble of flow_lbl[0] */
ecn = (hdr->flow_lbl[0] & 0x30);
/* for pretty debug output, also shift ecn to get the ecn value */
pr_debug("ecn 0x%02x dscp 0x%02x\n", ecn >> 4, dscp);
/* ECN is at 0x30 now, shift it to have ECN + DCSP */
return (ecn << 2) | dscp;
}
/* lowpan_iphc_is_flow_lbl_zero - check if flow label is zero */
static inline bool lowpan_iphc_is_flow_lbl_zero(const struct ipv6hdr *hdr)
{
return ((!(hdr->flow_lbl[0] & 0x0f)) &&
!hdr->flow_lbl[1] && !hdr->flow_lbl[2]);
}
/* lowpan_iphc_tf_compress - compress the traffic class which is set by
* ipv6hdr. Return the corresponding format identifier which is used.
*/
static u8 lowpan_iphc_tf_compress(u8 **hc_ptr, const struct ipv6hdr *hdr)
{
/* get ecn dscp data in a byteformat as: ECN(hi) + DSCP(lo) */
u8 tc = lowpan_iphc_get_tc(hdr), tf[4], val;
/* printout the traffic class in hc format */
pr_debug("tc 0x%02x\n", tc);
if (lowpan_iphc_is_flow_lbl_zero(hdr)) {
if (!tc) {
/* 11: Traffic Class and Flow Label are elided. */
val = LOWPAN_IPHC_TF_11;
} else {
/* 10: ECN + DSCP (1 byte), Flow Label is elided.
*
* 0 1 2 3 4 5 6 7
* +-+-+-+-+-+-+-+-+
* |ECN| DSCP |
* +-+-+-+-+-+-+-+-+
*/
lowpan_push_hc_data(hc_ptr, &tc, sizeof(tc));
val = LOWPAN_IPHC_TF_10;
}
} else {
/* check if dscp is zero, it's after the first two bit */
if (!(tc & 0x3f)) {
/* 01: ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided
*
* 1 2
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |ECN|rsv| Flow Label |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
memcpy(&tf[0], &hdr->flow_lbl[0], 3);
/* zero the highest 4-bits, contains DCSP + ECN */
tf[0] &= ~0xf0;
/* set ECN */
tf[0] |= (tc & 0xc0);
lowpan_push_hc_data(hc_ptr, tf, 3);
val = LOWPAN_IPHC_TF_01;
} else {
/* 00: ECN + DSCP + 4-bit Pad + Flow Label (4 bytes)
*
* 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |ECN| DSCP | rsv | Flow Label |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
memcpy(&tf[0], &tc, sizeof(tc));
/* highest nibble of flow_lbl[0] is part of DSCP + ECN
* which will be the 4-bit pad and will be filled with
* zeros afterwards.
*/
memcpy(&tf[1], &hdr->flow_lbl[0], 3);
/* zero the 4-bit pad, which is reserved */
tf[1] &= ~0xf0;
lowpan_push_hc_data(hc_ptr, tf, 4);
val = LOWPAN_IPHC_TF_00;
}
}
return val;
}
static u8 lowpan_iphc_mcast_ctx_addr_compress(u8 **hc_ptr,
const struct lowpan_iphc_ctx *ctx,
const struct in6_addr *ipaddr)
{
u8 data[6];
/* flags/scope, reserved (RIID) */
memcpy(data, &ipaddr->s6_addr[1], 2);
/* group ID */
memcpy(&data[1], &ipaddr->s6_addr[11], 4);
lowpan_push_hc_data(hc_ptr, data, 6);
return LOWPAN_IPHC_DAM_00;
}
static u8 lowpan_iphc_mcast_addr_compress(u8 **hc_ptr,
const struct in6_addr *ipaddr)
{
u8 val;
if (lowpan_is_mcast_addr_compressable8(ipaddr)) {
pr_debug("compressed to 1 octet\n");
/* use last byte */
lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr[15], 1);
val = LOWPAN_IPHC_DAM_11;
} else if (lowpan_is_mcast_addr_compressable32(ipaddr)) {
pr_debug("compressed to 4 octets\n");
/* second byte + the last three */
lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr[1], 1);
lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr[13], 3);
val = LOWPAN_IPHC_DAM_10;
} else if (lowpan_is_mcast_addr_compressable48(ipaddr)) {
pr_debug("compressed to 6 octets\n");
/* second byte + the last five */
lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr[1], 1);
lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr[11], 5);
val = LOWPAN_IPHC_DAM_01;
} else {
pr_debug("using full address\n");
lowpan_push_hc_data(hc_ptr, ipaddr->s6_addr, 16);
val = LOWPAN_IPHC_DAM_00;
}
return val;
}
int lowpan_header_compress(struct sk_buff *skb, const struct net_device *dev,
const void *daddr, const void *saddr)
{
u8 iphc0, iphc1, *hc_ptr, cid = 0;
struct ipv6hdr *hdr;
u8 head[LOWPAN_IPHC_MAX_HC_BUF_LEN] = {};
struct lowpan_iphc_ctx *dci, *sci, dci_entry, sci_entry;
int ret, ipv6_daddr_type, ipv6_saddr_type;
if (skb->protocol != htons(ETH_P_IPV6))
return -EINVAL;
hdr = ipv6_hdr(skb);
hc_ptr = head + 2;
pr_debug("IPv6 header dump:\n\tversion = %d\n\tlength = %d\n"
"\tnexthdr = 0x%02x\n\thop_lim = %d\n\tdest = %pI6c\n",
hdr->version, ntohs(hdr->payload_len), hdr->nexthdr,
hdr->hop_limit, &hdr->daddr);
raw_dump_table(__func__, "raw skb network header dump",
skb_network_header(skb), sizeof(struct ipv6hdr));
/* As we copy some bit-length fields, in the IPHC encoding bytes,
* we sometimes use |=
* If the field is 0, and the current bit value in memory is 1,
* this does not work. We therefore reset the IPHC encoding here
*/
iphc0 = LOWPAN_DISPATCH_IPHC;
iphc1 = 0;
raw_dump_table(__func__, "sending raw skb network uncompressed packet",
skb->data, skb->len);
ipv6_daddr_type = ipv6_addr_type(&hdr->daddr);
spin_lock_bh(&lowpan_dev(dev)->ctx.lock);
if (ipv6_daddr_type & IPV6_ADDR_MULTICAST)
dci = lowpan_iphc_ctx_get_by_mcast_addr(dev, &hdr->daddr);
else
dci = lowpan_iphc_ctx_get_by_addr(dev, &hdr->daddr);
if (dci) {
memcpy(&dci_entry, dci, sizeof(*dci));
cid |= dci->id;
}
spin_unlock_bh(&lowpan_dev(dev)->ctx.lock);
spin_lock_bh(&lowpan_dev(dev)->ctx.lock);
sci = lowpan_iphc_ctx_get_by_addr(dev, &hdr->saddr);
if (sci) {
memcpy(&sci_entry, sci, sizeof(*sci));
cid |= (sci->id << 4);
}
spin_unlock_bh(&lowpan_dev(dev)->ctx.lock);
/* if cid is zero it will be compressed */
if (cid) {
iphc1 |= LOWPAN_IPHC_CID;
lowpan_push_hc_data(&hc_ptr, &cid, sizeof(cid));
}
/* Traffic Class, Flow Label compression */
iphc0 |= lowpan_iphc_tf_compress(&hc_ptr, hdr);
/* NOTE: payload length is always compressed */
/* Check if we provide the nhc format for nexthdr and compression
* functionality. If not nexthdr is handled inline and not compressed.
*/
ret = lowpan_nhc_check_compression(skb, hdr, &hc_ptr);
if (ret == -ENOENT)
lowpan_push_hc_data(&hc_ptr, &hdr->nexthdr,
sizeof(hdr->nexthdr));
else
iphc0 |= LOWPAN_IPHC_NH;
/* Hop limit
* if 1: compress, encoding is 01
* if 64: compress, encoding is 10
* if 255: compress, encoding is 11
* else do not compress
*/
switch (hdr->hop_limit) {
case 1:
iphc0 |= LOWPAN_IPHC_HLIM_01;
break;
case 64:
iphc0 |= LOWPAN_IPHC_HLIM_10;
break;
case 255:
iphc0 |= LOWPAN_IPHC_HLIM_11;
break;
default:
lowpan_push_hc_data(&hc_ptr, &hdr->hop_limit,
sizeof(hdr->hop_limit));
}
ipv6_saddr_type = ipv6_addr_type(&hdr->saddr);
/* source address compression */
if (ipv6_saddr_type == IPV6_ADDR_ANY) {
pr_debug("source address is unspecified, setting SAC\n");
iphc1 |= LOWPAN_IPHC_SAC;
} else {
if (sci) {
iphc1 |= lowpan_compress_ctx_addr(&hc_ptr, dev,
&hdr->saddr,
&sci_entry, saddr,
true);
iphc1 |= LOWPAN_IPHC_SAC;
} else {
if (ipv6_saddr_type & IPV6_ADDR_LINKLOCAL &&
lowpan_is_linklocal_zero_padded(hdr->saddr)) {
iphc1 |= lowpan_compress_addr_64(&hc_ptr, dev,
&hdr->saddr,
saddr, true);
pr_debug("source address unicast link-local %pI6c iphc1 0x%02x\n",
&hdr->saddr, iphc1);
} else {
pr_debug("send the full source address\n");
lowpan_push_hc_data(&hc_ptr,
hdr->saddr.s6_addr, 16);
}
}
}
/* destination address compression */
if (ipv6_daddr_type & IPV6_ADDR_MULTICAST) {
pr_debug("destination address is multicast: ");
iphc1 |= LOWPAN_IPHC_M;
if (dci) {
iphc1 |= lowpan_iphc_mcast_ctx_addr_compress(&hc_ptr,
&dci_entry,
&hdr->daddr);
iphc1 |= LOWPAN_IPHC_DAC;
} else {
iphc1 |= lowpan_iphc_mcast_addr_compress(&hc_ptr,
&hdr->daddr);
}
} else {
if (dci) {
iphc1 |= lowpan_compress_ctx_addr(&hc_ptr, dev,
&hdr->daddr,
&dci_entry, daddr,
false);
iphc1 |= LOWPAN_IPHC_DAC;
} else {
if (ipv6_daddr_type & IPV6_ADDR_LINKLOCAL &&
lowpan_is_linklocal_zero_padded(hdr->daddr)) {
iphc1 |= lowpan_compress_addr_64(&hc_ptr, dev,
&hdr->daddr,
daddr, false);
pr_debug("dest address unicast link-local %pI6c iphc1 0x%02x\n",
&hdr->daddr, iphc1);
} else {
pr_debug("dest address unicast %pI6c\n",
&hdr->daddr);
lowpan_push_hc_data(&hc_ptr,
hdr->daddr.s6_addr, 16);
}
}
}
/* next header compression */
if (iphc0 & LOWPAN_IPHC_NH) {
ret = lowpan_nhc_do_compression(skb, hdr, &hc_ptr);
if (ret < 0)
return ret;
}
head[0] = iphc0;
head[1] = iphc1;
skb_pull(skb, sizeof(struct ipv6hdr));
skb_reset_transport_header(skb);
memcpy(skb_push(skb, hc_ptr - head), head, hc_ptr - head);
skb_reset_network_header(skb);
pr_debug("header len %d skb %u\n", (int)(hc_ptr - head), skb->len);
raw_dump_table(__func__, "raw skb data dump compressed",
skb->data, skb->len);
return 0;
}
EXPORT_SYMBOL_GPL(lowpan_header_compress);
| linux-master | net/6lowpan/iphc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Authors:
* (C) 2016 Pengutronix, Alexander Aring <[email protected]>
*/
#include <net/6lowpan.h>
#include <net/addrconf.h>
#include <net/ndisc.h>
#include "6lowpan_i.h"
static int lowpan_ndisc_is_useropt(u8 nd_opt_type)
{
return nd_opt_type == ND_OPT_6CO;
}
#if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
#define NDISC_802154_SHORT_ADDR_LENGTH 1
static int lowpan_ndisc_parse_802154_options(const struct net_device *dev,
struct nd_opt_hdr *nd_opt,
struct ndisc_options *ndopts)
{
switch (nd_opt->nd_opt_len) {
case NDISC_802154_SHORT_ADDR_LENGTH:
if (ndopts->nd_802154_opt_array[nd_opt->nd_opt_type])
ND_PRINTK(2, warn,
"%s: duplicated short addr ND6 option found: type=%d\n",
__func__, nd_opt->nd_opt_type);
else
ndopts->nd_802154_opt_array[nd_opt->nd_opt_type] = nd_opt;
return 1;
default:
/* all others will be handled by ndisc IPv6 option parsing */
return 0;
}
}
static int lowpan_ndisc_parse_options(const struct net_device *dev,
struct nd_opt_hdr *nd_opt,
struct ndisc_options *ndopts)
{
if (!lowpan_is_ll(dev, LOWPAN_LLTYPE_IEEE802154))
return 0;
switch (nd_opt->nd_opt_type) {
case ND_OPT_SOURCE_LL_ADDR:
case ND_OPT_TARGET_LL_ADDR:
return lowpan_ndisc_parse_802154_options(dev, nd_opt, ndopts);
default:
return 0;
}
}
static void lowpan_ndisc_802154_update(struct neighbour *n, u32 flags,
u8 icmp6_type,
const struct ndisc_options *ndopts)
{
struct lowpan_802154_neigh *neigh = lowpan_802154_neigh(neighbour_priv(n));
u8 *lladdr_short = NULL;
switch (icmp6_type) {
case NDISC_ROUTER_SOLICITATION:
case NDISC_ROUTER_ADVERTISEMENT:
case NDISC_NEIGHBOUR_SOLICITATION:
if (ndopts->nd_802154_opts_src_lladdr) {
lladdr_short = __ndisc_opt_addr_data(ndopts->nd_802154_opts_src_lladdr,
IEEE802154_SHORT_ADDR_LEN, 0);
if (!lladdr_short) {
ND_PRINTK(2, warn,
"NA: invalid short link-layer address length\n");
return;
}
}
break;
case NDISC_REDIRECT:
case NDISC_NEIGHBOUR_ADVERTISEMENT:
if (ndopts->nd_802154_opts_tgt_lladdr) {
lladdr_short = __ndisc_opt_addr_data(ndopts->nd_802154_opts_tgt_lladdr,
IEEE802154_SHORT_ADDR_LEN, 0);
if (!lladdr_short) {
ND_PRINTK(2, warn,
"NA: invalid short link-layer address length\n");
return;
}
}
break;
default:
break;
}
write_lock_bh(&n->lock);
if (lladdr_short) {
ieee802154_be16_to_le16(&neigh->short_addr, lladdr_short);
if (!lowpan_802154_is_valid_src_short_addr(neigh->short_addr))
neigh->short_addr = cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC);
}
write_unlock_bh(&n->lock);
}
static void lowpan_ndisc_update(const struct net_device *dev,
struct neighbour *n, u32 flags, u8 icmp6_type,
const struct ndisc_options *ndopts)
{
if (!lowpan_is_ll(dev, LOWPAN_LLTYPE_IEEE802154))
return;
/* react on overrides only. TODO check if this is really right. */
if (flags & NEIGH_UPDATE_F_OVERRIDE)
lowpan_ndisc_802154_update(n, flags, icmp6_type, ndopts);
}
static int lowpan_ndisc_opt_addr_space(const struct net_device *dev,
u8 icmp6_type, struct neighbour *neigh,
u8 *ha_buf, u8 **ha)
{
struct lowpan_802154_neigh *n;
struct wpan_dev *wpan_dev;
int addr_space = 0;
if (!lowpan_is_ll(dev, LOWPAN_LLTYPE_IEEE802154))
return 0;
switch (icmp6_type) {
case NDISC_REDIRECT:
n = lowpan_802154_neigh(neighbour_priv(neigh));
read_lock_bh(&neigh->lock);
if (lowpan_802154_is_valid_src_short_addr(n->short_addr)) {
memcpy(ha_buf, &n->short_addr,
IEEE802154_SHORT_ADDR_LEN);
read_unlock_bh(&neigh->lock);
addr_space += __ndisc_opt_addr_space(IEEE802154_SHORT_ADDR_LEN, 0);
*ha = ha_buf;
} else {
read_unlock_bh(&neigh->lock);
}
break;
case NDISC_NEIGHBOUR_ADVERTISEMENT:
case NDISC_NEIGHBOUR_SOLICITATION:
case NDISC_ROUTER_SOLICITATION:
wpan_dev = lowpan_802154_dev(dev)->wdev->ieee802154_ptr;
if (lowpan_802154_is_valid_src_short_addr(wpan_dev->short_addr))
addr_space = __ndisc_opt_addr_space(IEEE802154_SHORT_ADDR_LEN, 0);
break;
default:
break;
}
return addr_space;
}
static void lowpan_ndisc_fill_addr_option(const struct net_device *dev,
struct sk_buff *skb, u8 icmp6_type,
const u8 *ha)
{
struct wpan_dev *wpan_dev;
__be16 short_addr;
u8 opt_type;
if (!lowpan_is_ll(dev, LOWPAN_LLTYPE_IEEE802154))
return;
switch (icmp6_type) {
case NDISC_REDIRECT:
if (ha) {
ieee802154_le16_to_be16(&short_addr, ha);
__ndisc_fill_addr_option(skb, ND_OPT_TARGET_LL_ADDR,
&short_addr,
IEEE802154_SHORT_ADDR_LEN, 0);
}
return;
case NDISC_NEIGHBOUR_ADVERTISEMENT:
opt_type = ND_OPT_TARGET_LL_ADDR;
break;
case NDISC_ROUTER_SOLICITATION:
case NDISC_NEIGHBOUR_SOLICITATION:
opt_type = ND_OPT_SOURCE_LL_ADDR;
break;
default:
return;
}
wpan_dev = lowpan_802154_dev(dev)->wdev->ieee802154_ptr;
if (lowpan_802154_is_valid_src_short_addr(wpan_dev->short_addr)) {
ieee802154_le16_to_be16(&short_addr,
&wpan_dev->short_addr);
__ndisc_fill_addr_option(skb, opt_type, &short_addr,
IEEE802154_SHORT_ADDR_LEN, 0);
}
}
static void lowpan_ndisc_prefix_rcv_add_addr(struct net *net,
struct net_device *dev,
const struct prefix_info *pinfo,
struct inet6_dev *in6_dev,
struct in6_addr *addr,
int addr_type, u32 addr_flags,
bool sllao, bool tokenized,
__u32 valid_lft,
u32 prefered_lft,
bool dev_addr_generated)
{
int err;
/* generates short based address for RA PIO's */
if (lowpan_is_ll(dev, LOWPAN_LLTYPE_IEEE802154) && dev_addr_generated &&
!addrconf_ifid_802154_6lowpan(addr->s6_addr + 8, dev)) {
err = addrconf_prefix_rcv_add_addr(net, dev, pinfo, in6_dev,
addr, addr_type, addr_flags,
sllao, tokenized, valid_lft,
prefered_lft);
if (err)
ND_PRINTK(2, warn,
"RA: could not add a short address based address for prefix: %pI6c\n",
&pinfo->prefix);
}
}
#endif
const struct ndisc_ops lowpan_ndisc_ops = {
.is_useropt = lowpan_ndisc_is_useropt,
#if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
.parse_options = lowpan_ndisc_parse_options,
.update = lowpan_ndisc_update,
.opt_addr_space = lowpan_ndisc_opt_addr_space,
.fill_addr_option = lowpan_ndisc_fill_addr_option,
.prefix_rcv_add_addr = lowpan_ndisc_prefix_rcv_add_addr,
#endif
};
| linux-master | net/6lowpan/ndisc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* 6LoWPAN IPv6 Hop-by-Hop Options Header compression according to RFC6282
*/
#include "nhc.h"
#define LOWPAN_NHC_HOP_ID_0 0xe0
#define LOWPAN_NHC_HOP_MASK_0 0xfe
LOWPAN_NHC(nhc_hop, "RFC6282 Hop-by-Hop Options", NEXTHDR_HOP, 0,
LOWPAN_NHC_HOP_ID_0, LOWPAN_NHC_HOP_MASK_0, NULL, NULL);
module_lowpan_nhc(nhc_hop);
MODULE_DESCRIPTION("6LoWPAN next header RFC6282 Hop-by-Hop Options compression");
MODULE_LICENSE("GPL");
| linux-master | net/6lowpan/nhc_hop.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* 6LoWPAN Extension Header compression according to RFC7400
*/
#include "nhc.h"
#define LOWPAN_GHC_EXT_HOP_ID_0 0xb0
#define LOWPAN_GHC_EXT_HOP_MASK_0 0xfe
LOWPAN_NHC(ghc_ext_hop, "RFC7400 Hop-by-Hop Extension Header", NEXTHDR_HOP, 0,
LOWPAN_GHC_EXT_HOP_ID_0, LOWPAN_GHC_EXT_HOP_MASK_0, NULL, NULL);
module_lowpan_nhc(ghc_ext_hop);
MODULE_DESCRIPTION("6LoWPAN generic header hop-by-hop extension compression");
MODULE_LICENSE("GPL");
| linux-master | net/6lowpan/nhc_ghc_ext_hop.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Authors:
* (C) 2015 Pengutronix, Alexander Aring <[email protected]>
*/
#include <linux/if_arp.h>
#include <linux/module.h>
#include <net/6lowpan.h>
#include <net/addrconf.h>
#include "6lowpan_i.h"
int lowpan_register_netdevice(struct net_device *dev,
enum lowpan_lltypes lltype)
{
int i, ret;
switch (lltype) {
case LOWPAN_LLTYPE_IEEE802154:
dev->addr_len = EUI64_ADDR_LEN;
break;
case LOWPAN_LLTYPE_BTLE:
dev->addr_len = ETH_ALEN;
break;
}
dev->type = ARPHRD_6LOWPAN;
dev->mtu = IPV6_MIN_MTU;
lowpan_dev(dev)->lltype = lltype;
spin_lock_init(&lowpan_dev(dev)->ctx.lock);
for (i = 0; i < LOWPAN_IPHC_CTX_TABLE_SIZE; i++)
lowpan_dev(dev)->ctx.table[i].id = i;
dev->ndisc_ops = &lowpan_ndisc_ops;
ret = register_netdevice(dev);
if (ret < 0)
return ret;
lowpan_dev_debugfs_init(dev);
return ret;
}
EXPORT_SYMBOL(lowpan_register_netdevice);
int lowpan_register_netdev(struct net_device *dev,
enum lowpan_lltypes lltype)
{
int ret;
rtnl_lock();
ret = lowpan_register_netdevice(dev, lltype);
rtnl_unlock();
return ret;
}
EXPORT_SYMBOL(lowpan_register_netdev);
void lowpan_unregister_netdevice(struct net_device *dev)
{
unregister_netdevice(dev);
lowpan_dev_debugfs_exit(dev);
}
EXPORT_SYMBOL(lowpan_unregister_netdevice);
void lowpan_unregister_netdev(struct net_device *dev)
{
rtnl_lock();
lowpan_unregister_netdevice(dev);
rtnl_unlock();
}
EXPORT_SYMBOL(lowpan_unregister_netdev);
int addrconf_ifid_802154_6lowpan(u8 *eui, struct net_device *dev)
{
struct wpan_dev *wpan_dev = lowpan_802154_dev(dev)->wdev->ieee802154_ptr;
/* Set short_addr autoconfiguration if short_addr is present only */
if (!lowpan_802154_is_valid_src_short_addr(wpan_dev->short_addr))
return -1;
/* For either address format, all zero addresses MUST NOT be used */
if (wpan_dev->pan_id == cpu_to_le16(0x0000) &&
wpan_dev->short_addr == cpu_to_le16(0x0000))
return -1;
/* Alternatively, if no PAN ID is known, 16 zero bits may be used */
if (wpan_dev->pan_id == cpu_to_le16(IEEE802154_PAN_ID_BROADCAST))
memset(eui, 0, 2);
else
ieee802154_le16_to_be16(eui, &wpan_dev->pan_id);
/* The "Universal/Local" (U/L) bit shall be set to zero */
eui[0] &= ~2;
eui[2] = 0;
eui[3] = 0xFF;
eui[4] = 0xFE;
eui[5] = 0;
ieee802154_le16_to_be16(&eui[6], &wpan_dev->short_addr);
return 0;
}
static int lowpan_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct inet6_dev *idev;
struct in6_addr addr;
int i;
if (dev->type != ARPHRD_6LOWPAN)
return NOTIFY_DONE;
idev = __in6_dev_get(dev);
if (!idev)
return NOTIFY_DONE;
switch (event) {
case NETDEV_UP:
case NETDEV_CHANGE:
/* (802.15.4 6LoWPAN short address slaac handling */
if (lowpan_is_ll(dev, LOWPAN_LLTYPE_IEEE802154) &&
addrconf_ifid_802154_6lowpan(addr.s6_addr + 8, dev) == 0) {
__ipv6_addr_set_half(&addr.s6_addr32[0],
htonl(0xFE800000), 0);
addrconf_add_linklocal(idev, &addr, 0);
}
break;
case NETDEV_DOWN:
for (i = 0; i < LOWPAN_IPHC_CTX_TABLE_SIZE; i++)
clear_bit(LOWPAN_IPHC_CTX_FLAG_ACTIVE,
&lowpan_dev(dev)->ctx.table[i].flags);
break;
default:
return NOTIFY_DONE;
}
return NOTIFY_OK;
}
static struct notifier_block lowpan_notifier = {
.notifier_call = lowpan_event,
};
static int __init lowpan_module_init(void)
{
int ret;
lowpan_debugfs_init();
ret = register_netdevice_notifier(&lowpan_notifier);
if (ret < 0) {
lowpan_debugfs_exit();
return ret;
}
request_module_nowait("nhc_dest");
request_module_nowait("nhc_fragment");
request_module_nowait("nhc_hop");
request_module_nowait("nhc_ipv6");
request_module_nowait("nhc_mobility");
request_module_nowait("nhc_routing");
request_module_nowait("nhc_udp");
return 0;
}
static void __exit lowpan_module_exit(void)
{
lowpan_debugfs_exit();
unregister_netdevice_notifier(&lowpan_notifier);
}
module_init(lowpan_module_init);
module_exit(lowpan_module_exit);
MODULE_LICENSE("GPL");
| linux-master | net/6lowpan/core.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* 6LoWPAN UDP compression according to RFC7400
*/
#include "nhc.h"
#define LOWPAN_GHC_UDP_ID_0 0xd0
#define LOWPAN_GHC_UDP_MASK_0 0xf8
LOWPAN_NHC(ghc_udp, "RFC7400 UDP", NEXTHDR_UDP, 0,
LOWPAN_GHC_UDP_ID_0, LOWPAN_GHC_UDP_MASK_0, NULL, NULL);
module_lowpan_nhc(ghc_udp);
MODULE_DESCRIPTION("6LoWPAN generic header UDP compression");
MODULE_LICENSE("GPL");
| linux-master | net/6lowpan/nhc_ghc_udp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* 6LoWPAN Extension Header compression according to RFC7400
*/
#include "nhc.h"
#define LOWPAN_GHC_EXT_FRAG_ID_0 0xb4
#define LOWPAN_GHC_EXT_FRAG_MASK_0 0xfe
LOWPAN_NHC(ghc_ext_frag, "RFC7400 Fragmentation Extension Header",
NEXTHDR_FRAGMENT, 0, LOWPAN_GHC_EXT_FRAG_ID_0,
LOWPAN_GHC_EXT_FRAG_MASK_0, NULL, NULL);
module_lowpan_nhc(ghc_ext_frag);
MODULE_DESCRIPTION("6LoWPAN generic header fragmentation extension compression");
MODULE_LICENSE("GPL");
| linux-master | net/6lowpan/nhc_ghc_ext_frag.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* 6LoWPAN IPv6 UDP compression according to RFC6282
*
* Authors:
* Alexander Aring <[email protected]>
*
* Original written by:
* Alexander Smirnov <[email protected]>
* Jon Smirl <[email protected]>
*/
#include "nhc.h"
#define LOWPAN_NHC_UDP_MASK 0xF8
#define LOWPAN_NHC_UDP_ID 0xF0
#define LOWPAN_NHC_UDP_4BIT_PORT 0xF0B0
#define LOWPAN_NHC_UDP_4BIT_MASK 0xFFF0
#define LOWPAN_NHC_UDP_8BIT_PORT 0xF000
#define LOWPAN_NHC_UDP_8BIT_MASK 0xFF00
/* values for port compression, _with checksum_ ie bit 5 set to 0 */
/* all inline */
#define LOWPAN_NHC_UDP_CS_P_00 0xF0
/* source 16bit inline, dest = 0xF0 + 8 bit inline */
#define LOWPAN_NHC_UDP_CS_P_01 0xF1
/* source = 0xF0 + 8bit inline, dest = 16 bit inline */
#define LOWPAN_NHC_UDP_CS_P_10 0xF2
/* source & dest = 0xF0B + 4bit inline */
#define LOWPAN_NHC_UDP_CS_P_11 0xF3
/* checksum elided */
#define LOWPAN_NHC_UDP_CS_C 0x04
static int udp_uncompress(struct sk_buff *skb, size_t needed)
{
u8 tmp = 0, val = 0;
struct udphdr uh;
bool fail;
int err;
fail = lowpan_fetch_skb(skb, &tmp, sizeof(tmp));
pr_debug("UDP header uncompression\n");
switch (tmp & LOWPAN_NHC_UDP_CS_P_11) {
case LOWPAN_NHC_UDP_CS_P_00:
fail |= lowpan_fetch_skb(skb, &uh.source, sizeof(uh.source));
fail |= lowpan_fetch_skb(skb, &uh.dest, sizeof(uh.dest));
break;
case LOWPAN_NHC_UDP_CS_P_01:
fail |= lowpan_fetch_skb(skb, &uh.source, sizeof(uh.source));
fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
uh.dest = htons(val + LOWPAN_NHC_UDP_8BIT_PORT);
break;
case LOWPAN_NHC_UDP_CS_P_10:
fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
uh.source = htons(val + LOWPAN_NHC_UDP_8BIT_PORT);
fail |= lowpan_fetch_skb(skb, &uh.dest, sizeof(uh.dest));
break;
case LOWPAN_NHC_UDP_CS_P_11:
fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
uh.source = htons(LOWPAN_NHC_UDP_4BIT_PORT + (val >> 4));
uh.dest = htons(LOWPAN_NHC_UDP_4BIT_PORT + (val & 0x0f));
break;
default:
BUG();
}
pr_debug("uncompressed UDP ports: src = %d, dst = %d\n",
ntohs(uh.source), ntohs(uh.dest));
/* checksum */
if (tmp & LOWPAN_NHC_UDP_CS_C) {
pr_debug_ratelimited("checksum elided currently not supported\n");
fail = true;
} else {
fail |= lowpan_fetch_skb(skb, &uh.check, sizeof(uh.check));
}
if (fail)
return -EINVAL;
/* UDP length needs to be inferred from the lower layers
* here, we obtain the hint from the remaining size of the
* frame
*/
switch (lowpan_dev(skb->dev)->lltype) {
case LOWPAN_LLTYPE_IEEE802154:
if (lowpan_802154_cb(skb)->d_size)
uh.len = htons(lowpan_802154_cb(skb)->d_size -
sizeof(struct ipv6hdr));
else
uh.len = htons(skb->len + sizeof(struct udphdr));
break;
default:
uh.len = htons(skb->len + sizeof(struct udphdr));
break;
}
pr_debug("uncompressed UDP length: src = %d", ntohs(uh.len));
/* replace the compressed UDP head by the uncompressed UDP
* header
*/
err = skb_cow(skb, needed);
if (unlikely(err))
return err;
skb_push(skb, sizeof(struct udphdr));
skb_copy_to_linear_data(skb, &uh, sizeof(struct udphdr));
return 0;
}
static int udp_compress(struct sk_buff *skb, u8 **hc_ptr)
{
const struct udphdr *uh = udp_hdr(skb);
u8 tmp;
if (((ntohs(uh->source) & LOWPAN_NHC_UDP_4BIT_MASK) ==
LOWPAN_NHC_UDP_4BIT_PORT) &&
((ntohs(uh->dest) & LOWPAN_NHC_UDP_4BIT_MASK) ==
LOWPAN_NHC_UDP_4BIT_PORT)) {
pr_debug("UDP header: both ports compression to 4 bits\n");
/* compression value */
tmp = LOWPAN_NHC_UDP_CS_P_11;
lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
/* source and destination port */
tmp = ntohs(uh->dest) - LOWPAN_NHC_UDP_4BIT_PORT +
((ntohs(uh->source) - LOWPAN_NHC_UDP_4BIT_PORT) << 4);
lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
} else if ((ntohs(uh->dest) & LOWPAN_NHC_UDP_8BIT_MASK) ==
LOWPAN_NHC_UDP_8BIT_PORT) {
pr_debug("UDP header: remove 8 bits of dest\n");
/* compression value */
tmp = LOWPAN_NHC_UDP_CS_P_01;
lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
/* source port */
lowpan_push_hc_data(hc_ptr, &uh->source, sizeof(uh->source));
/* destination port */
tmp = ntohs(uh->dest) - LOWPAN_NHC_UDP_8BIT_PORT;
lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
} else if ((ntohs(uh->source) & LOWPAN_NHC_UDP_8BIT_MASK) ==
LOWPAN_NHC_UDP_8BIT_PORT) {
pr_debug("UDP header: remove 8 bits of source\n");
/* compression value */
tmp = LOWPAN_NHC_UDP_CS_P_10;
lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
/* source port */
tmp = ntohs(uh->source) - LOWPAN_NHC_UDP_8BIT_PORT;
lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
/* destination port */
lowpan_push_hc_data(hc_ptr, &uh->dest, sizeof(uh->dest));
} else {
pr_debug("UDP header: can't compress\n");
/* compression value */
tmp = LOWPAN_NHC_UDP_CS_P_00;
lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
/* source port */
lowpan_push_hc_data(hc_ptr, &uh->source, sizeof(uh->source));
/* destination port */
lowpan_push_hc_data(hc_ptr, &uh->dest, sizeof(uh->dest));
}
/* checksum is always inline */
lowpan_push_hc_data(hc_ptr, &uh->check, sizeof(uh->check));
return 0;
}
LOWPAN_NHC(nhc_udp, "RFC6282 UDP", NEXTHDR_UDP, sizeof(struct udphdr),
LOWPAN_NHC_UDP_ID, LOWPAN_NHC_UDP_MASK, udp_uncompress, udp_compress);
module_lowpan_nhc(nhc_udp);
MODULE_DESCRIPTION("6LoWPAN next header RFC6282 UDP compression");
MODULE_LICENSE("GPL");
| linux-master | net/6lowpan/nhc_udp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* 6LoWPAN Extension Header compression according to RFC7400
*/
#include "nhc.h"
#define LOWPAN_GHC_EXT_DEST_ID_0 0xb6
#define LOWPAN_GHC_EXT_DEST_MASK_0 0xfe
LOWPAN_NHC(ghc_ext_dest, "RFC7400 Destination Extension Header", NEXTHDR_DEST,
0, LOWPAN_GHC_EXT_DEST_ID_0, LOWPAN_GHC_EXT_DEST_MASK_0, NULL, NULL);
module_lowpan_nhc(ghc_ext_dest);
MODULE_DESCRIPTION("6LoWPAN generic header destination extension compression");
MODULE_LICENSE("GPL");
| linux-master | net/6lowpan/nhc_ghc_ext_dest.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* 6LoWPAN IPv6 Destination Options Header compression according to
* RFC6282
*/
#include "nhc.h"
#define LOWPAN_NHC_DEST_ID_0 0xe6
#define LOWPAN_NHC_DEST_MASK_0 0xfe
LOWPAN_NHC(nhc_dest, "RFC6282 Destination Options", NEXTHDR_DEST, 0,
LOWPAN_NHC_DEST_ID_0, LOWPAN_NHC_DEST_MASK_0, NULL, NULL);
module_lowpan_nhc(nhc_dest);
MODULE_DESCRIPTION("6LoWPAN next header RFC6282 Destination Options compression");
MODULE_LICENSE("GPL");
| linux-master | net/6lowpan/nhc_dest.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* 6LoWPAN IPv6 Header compression according to RFC6282
*/
#include "nhc.h"
#define LOWPAN_NHC_IPV6_ID_0 0xee
#define LOWPAN_NHC_IPV6_MASK_0 0xfe
LOWPAN_NHC(nhc_ipv6, "RFC6282 IPv6", NEXTHDR_IPV6, 0, LOWPAN_NHC_IPV6_ID_0,
LOWPAN_NHC_IPV6_MASK_0, NULL, NULL);
module_lowpan_nhc(nhc_ipv6);
MODULE_DESCRIPTION("6LoWPAN next header RFC6282 IPv6 compression");
MODULE_LICENSE("GPL");
| linux-master | net/6lowpan/nhc_ipv6.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* 6LoWPAN next header compression
*
* Authors:
* Alexander Aring <[email protected]>
*/
#include <linux/netdevice.h>
#include <net/ipv6.h>
#include "nhc.h"
static const struct lowpan_nhc *lowpan_nexthdr_nhcs[NEXTHDR_MAX + 1];
static DEFINE_SPINLOCK(lowpan_nhc_lock);
static const struct lowpan_nhc *lowpan_nhc_by_nhcid(struct sk_buff *skb)
{
const struct lowpan_nhc *nhc;
int i;
u8 id;
if (!pskb_may_pull(skb, 1))
return NULL;
id = *skb->data;
for (i = 0; i < NEXTHDR_MAX + 1; i++) {
nhc = lowpan_nexthdr_nhcs[i];
if (!nhc)
continue;
if ((id & nhc->idmask) == nhc->id)
return nhc;
}
return NULL;
}
int lowpan_nhc_check_compression(struct sk_buff *skb,
const struct ipv6hdr *hdr, u8 **hc_ptr)
{
const struct lowpan_nhc *nhc;
int ret = 0;
spin_lock_bh(&lowpan_nhc_lock);
nhc = lowpan_nexthdr_nhcs[hdr->nexthdr];
if (!(nhc && nhc->compress))
ret = -ENOENT;
spin_unlock_bh(&lowpan_nhc_lock);
return ret;
}
int lowpan_nhc_do_compression(struct sk_buff *skb, const struct ipv6hdr *hdr,
u8 **hc_ptr)
{
int ret;
const struct lowpan_nhc *nhc;
spin_lock_bh(&lowpan_nhc_lock);
nhc = lowpan_nexthdr_nhcs[hdr->nexthdr];
/* check if the nhc module was removed in unlocked part.
* TODO: this is a workaround we should prevent unloading
* of nhc modules while unlocked part, this will always drop
* the lowpan packet but it's very unlikely.
*
* Solution isn't easy because we need to decide at
* lowpan_nhc_check_compression if we do a compression or not.
* Because the inline data which is added to skb, we can't move this
* handling.
*/
if (unlikely(!nhc || !nhc->compress)) {
ret = -EINVAL;
goto out;
}
/* In the case of RAW sockets the transport header is not set by
* the ip6 stack so we must set it ourselves
*/
if (skb->transport_header == skb->network_header)
skb_set_transport_header(skb, sizeof(struct ipv6hdr));
ret = nhc->compress(skb, hc_ptr);
if (ret < 0)
goto out;
/* skip the transport header */
skb_pull(skb, nhc->nexthdrlen);
out:
spin_unlock_bh(&lowpan_nhc_lock);
return ret;
}
int lowpan_nhc_do_uncompression(struct sk_buff *skb,
const struct net_device *dev,
struct ipv6hdr *hdr)
{
const struct lowpan_nhc *nhc;
int ret;
spin_lock_bh(&lowpan_nhc_lock);
nhc = lowpan_nhc_by_nhcid(skb);
if (nhc) {
if (nhc->uncompress) {
ret = nhc->uncompress(skb, sizeof(struct ipv6hdr) +
nhc->nexthdrlen);
if (ret < 0) {
spin_unlock_bh(&lowpan_nhc_lock);
return ret;
}
} else {
spin_unlock_bh(&lowpan_nhc_lock);
netdev_warn(dev, "received nhc id for %s which is not implemented.\n",
nhc->name);
return -ENOTSUPP;
}
} else {
spin_unlock_bh(&lowpan_nhc_lock);
netdev_warn(dev, "received unknown nhc id which was not found.\n");
return -ENOENT;
}
hdr->nexthdr = nhc->nexthdr;
skb_reset_transport_header(skb);
raw_dump_table(__func__, "raw transport header dump",
skb_transport_header(skb), nhc->nexthdrlen);
spin_unlock_bh(&lowpan_nhc_lock);
return 0;
}
int lowpan_nhc_add(const struct lowpan_nhc *nhc)
{
int ret = 0;
spin_lock_bh(&lowpan_nhc_lock);
if (lowpan_nexthdr_nhcs[nhc->nexthdr]) {
ret = -EEXIST;
goto out;
}
lowpan_nexthdr_nhcs[nhc->nexthdr] = nhc;
out:
spin_unlock_bh(&lowpan_nhc_lock);
return ret;
}
EXPORT_SYMBOL(lowpan_nhc_add);
void lowpan_nhc_del(const struct lowpan_nhc *nhc)
{
spin_lock_bh(&lowpan_nhc_lock);
lowpan_nexthdr_nhcs[nhc->nexthdr] = NULL;
spin_unlock_bh(&lowpan_nhc_lock);
synchronize_net();
}
EXPORT_SYMBOL(lowpan_nhc_del);
| linux-master | net/6lowpan/nhc.c |
// SPDX-License-Identifier: GPL-2.0
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#ifdef CONFIG_PROC_FS
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/ktime.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/atmmpc.h>
#include <linux/atm.h>
#include <linux/gfp.h>
#include "mpc.h"
#include "mpoa_caches.h"
/*
* mpoa_proc.c: Implementation MPOA client's proc
* file system statistics
*/
#if 1
#define dprintk(format, args...) \
printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args) /* debug */
#else
#define dprintk(format, args...) \
do { if (0) \
printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args);\
} while (0)
#endif
#if 0
#define ddprintk(format, args...) \
printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args) /* debug */
#else
#define ddprintk(format, args...) \
do { if (0) \
printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args);\
} while (0)
#endif
#define STAT_FILE_NAME "mpc" /* Our statistic file's name */
extern struct mpoa_client *mpcs;
extern struct proc_dir_entry *atm_proc_root; /* from proc.c. */
static int proc_mpc_open(struct inode *inode, struct file *file);
static ssize_t proc_mpc_write(struct file *file, const char __user *buff,
size_t nbytes, loff_t *ppos);
static int parse_qos(const char *buff);
static const struct proc_ops mpc_proc_ops = {
.proc_open = proc_mpc_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_write = proc_mpc_write,
.proc_release = seq_release,
};
/*
* Returns the state of an ingress cache entry as a string
*/
static const char *ingress_state_string(int state)
{
switch (state) {
case INGRESS_RESOLVING:
return "resolving ";
case INGRESS_RESOLVED:
return "resolved ";
case INGRESS_INVALID:
return "invalid ";
case INGRESS_REFRESHING:
return "refreshing ";
}
return "";
}
/*
* Returns the state of an egress cache entry as a string
*/
static const char *egress_state_string(int state)
{
switch (state) {
case EGRESS_RESOLVED:
return "resolved ";
case EGRESS_PURGE:
return "purge ";
case EGRESS_INVALID:
return "invalid ";
}
return "";
}
/*
* FIXME: mpcs (and per-mpc lists) have no locking whatsoever.
*/
static void *mpc_start(struct seq_file *m, loff_t *pos)
{
loff_t l = *pos;
struct mpoa_client *mpc;
if (!l--)
return SEQ_START_TOKEN;
for (mpc = mpcs; mpc; mpc = mpc->next)
if (!l--)
return mpc;
return NULL;
}
static void *mpc_next(struct seq_file *m, void *v, loff_t *pos)
{
struct mpoa_client *p = v;
(*pos)++;
return v == SEQ_START_TOKEN ? mpcs : p->next;
}
static void mpc_stop(struct seq_file *m, void *v)
{
}
/*
* READING function - called when the /proc/atm/mpoa file is read from.
*/
static int mpc_show(struct seq_file *m, void *v)
{
struct mpoa_client *mpc = v;
int i;
in_cache_entry *in_entry;
eg_cache_entry *eg_entry;
time64_t now;
unsigned char ip_string[16];
if (v == SEQ_START_TOKEN) {
atm_mpoa_disp_qos(m);
return 0;
}
seq_printf(m, "\nInterface %d:\n\n", mpc->dev_num);
seq_printf(m, "Ingress Entries:\nIP address State Holding time Packets fwded VPI VCI\n");
now = ktime_get_seconds();
for (in_entry = mpc->in_cache; in_entry; in_entry = in_entry->next) {
unsigned long seconds_delta = now - in_entry->time;
sprintf(ip_string, "%pI4", &in_entry->ctrl_info.in_dst_ip);
seq_printf(m, "%-16s%s%-14lu%-12u",
ip_string,
ingress_state_string(in_entry->entry_state),
in_entry->ctrl_info.holding_time -
seconds_delta,
in_entry->packets_fwded);
if (in_entry->shortcut)
seq_printf(m, " %-3d %-3d",
in_entry->shortcut->vpi,
in_entry->shortcut->vci);
seq_printf(m, "\n");
}
seq_printf(m, "\n");
seq_printf(m, "Egress Entries:\nIngress MPC ATM addr\nCache-id State Holding time Packets recvd Latest IP addr VPI VCI\n");
for (eg_entry = mpc->eg_cache; eg_entry; eg_entry = eg_entry->next) {
unsigned char *p = eg_entry->ctrl_info.in_MPC_data_ATM_addr;
unsigned long seconds_delta = now - eg_entry->time;
for (i = 0; i < ATM_ESA_LEN; i++)
seq_printf(m, "%02x", p[i]);
seq_printf(m, "\n%-16lu%s%-14lu%-15u",
(unsigned long)ntohl(eg_entry->ctrl_info.cache_id),
egress_state_string(eg_entry->entry_state),
(eg_entry->ctrl_info.holding_time - seconds_delta),
eg_entry->packets_rcvd);
/* latest IP address */
sprintf(ip_string, "%pI4", &eg_entry->latest_ip_addr);
seq_printf(m, "%-16s", ip_string);
if (eg_entry->shortcut)
seq_printf(m, " %-3d %-3d",
eg_entry->shortcut->vpi,
eg_entry->shortcut->vci);
seq_printf(m, "\n");
}
seq_printf(m, "\n");
return 0;
}
static const struct seq_operations mpc_op = {
.start = mpc_start,
.next = mpc_next,
.stop = mpc_stop,
.show = mpc_show
};
static int proc_mpc_open(struct inode *inode, struct file *file)
{
return seq_open(file, &mpc_op);
}
static ssize_t proc_mpc_write(struct file *file, const char __user *buff,
size_t nbytes, loff_t *ppos)
{
char *page, *p;
unsigned int len;
if (nbytes == 0)
return 0;
if (nbytes >= PAGE_SIZE)
nbytes = PAGE_SIZE-1;
page = (char *)__get_free_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
for (p = page, len = 0; len < nbytes; p++) {
if (get_user(*p, buff++)) {
free_page((unsigned long)page);
return -EFAULT;
}
len += 1;
if (*p == '\0' || *p == '\n')
break;
}
*p = '\0';
if (!parse_qos(page))
printk("mpoa: proc_mpc_write: could not parse '%s'\n", page);
free_page((unsigned long)page);
return len;
}
static int parse_qos(const char *buff)
{
/* possible lines look like this
* add 130.230.54.142 tx=max_pcr,max_sdu rx=max_pcr,max_sdu
*/
unsigned char ip[4];
int tx_pcr, tx_sdu, rx_pcr, rx_sdu;
__be32 ipaddr;
struct atm_qos qos;
memset(&qos, 0, sizeof(struct atm_qos));
if (sscanf(buff, "del %hhu.%hhu.%hhu.%hhu",
ip, ip+1, ip+2, ip+3) == 4) {
ipaddr = *(__be32 *)ip;
return atm_mpoa_delete_qos(atm_mpoa_search_qos(ipaddr));
}
if (sscanf(buff, "add %hhu.%hhu.%hhu.%hhu tx=%d,%d rx=tx",
ip, ip+1, ip+2, ip+3, &tx_pcr, &tx_sdu) == 6) {
rx_pcr = tx_pcr;
rx_sdu = tx_sdu;
} else if (sscanf(buff, "add %hhu.%hhu.%hhu.%hhu tx=%d,%d rx=%d,%d",
ip, ip+1, ip+2, ip+3, &tx_pcr, &tx_sdu, &rx_pcr, &rx_sdu) != 8)
return 0;
ipaddr = *(__be32 *)ip;
qos.txtp.traffic_class = ATM_CBR;
qos.txtp.max_pcr = tx_pcr;
qos.txtp.max_sdu = tx_sdu;
qos.rxtp.traffic_class = ATM_CBR;
qos.rxtp.max_pcr = rx_pcr;
qos.rxtp.max_sdu = rx_sdu;
qos.aal = ATM_AAL5;
dprintk("parse_qos(): setting qos parameters to tx=%d,%d rx=%d,%d\n",
qos.txtp.max_pcr, qos.txtp.max_sdu,
qos.rxtp.max_pcr, qos.rxtp.max_sdu);
atm_mpoa_add_qos(ipaddr, &qos);
return 1;
}
/*
* INITIALIZATION function - called when module is initialized/loaded.
*/
int mpc_proc_init(void)
{
struct proc_dir_entry *p;
p = proc_create(STAT_FILE_NAME, 0, atm_proc_root, &mpc_proc_ops);
if (!p) {
pr_err("Unable to initialize /proc/atm/%s\n", STAT_FILE_NAME);
return -ENOMEM;
}
return 0;
}
/*
* DELETING function - called when module is removed.
*/
void mpc_proc_clean(void)
{
remove_proc_entry(STAT_FILE_NAME, atm_proc_root);
}
#endif /* CONFIG_PROC_FS */
| linux-master | net/atm/mpoa_proc.c |
// SPDX-License-Identifier: GPL-2.0-only
/* net/atm/clip.c - RFC1577 Classical IP over ATM */
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/kernel.h> /* for UINT_MAX */
#include <linux/module.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/wait.h>
#include <linux/timer.h>
#include <linux/if_arp.h> /* for some manifest constants */
#include <linux/notifier.h>
#include <linux/atm.h>
#include <linux/atmdev.h>
#include <linux/atmclip.h>
#include <linux/atmarp.h>
#include <linux/capability.h>
#include <linux/ip.h> /* for net/route.h */
#include <linux/in.h> /* for struct sockaddr_in */
#include <linux/if.h> /* for IFF_UP */
#include <linux/inetdevice.h>
#include <linux/bitops.h>
#include <linux/poison.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/rcupdate.h>
#include <linux/jhash.h>
#include <linux/slab.h>
#include <net/route.h> /* for struct rtable and routing */
#include <net/icmp.h> /* icmp_send */
#include <net/arp.h>
#include <linux/param.h> /* for HZ */
#include <linux/uaccess.h>
#include <asm/byteorder.h> /* for htons etc. */
#include <linux/atomic.h>
#include "common.h"
#include "resources.h"
#include <net/atmclip.h>
static struct net_device *clip_devs;
static struct atm_vcc *atmarpd;
static struct timer_list idle_timer;
static const struct neigh_ops clip_neigh_ops;
static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip)
{
struct sock *sk;
struct atmarp_ctrl *ctrl;
struct sk_buff *skb;
pr_debug("(%d)\n", type);
if (!atmarpd)
return -EUNATCH;
skb = alloc_skb(sizeof(struct atmarp_ctrl), GFP_ATOMIC);
if (!skb)
return -ENOMEM;
ctrl = skb_put(skb, sizeof(struct atmarp_ctrl));
ctrl->type = type;
ctrl->itf_num = itf;
ctrl->ip = ip;
atm_force_charge(atmarpd, skb->truesize);
sk = sk_atm(atmarpd);
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_data_ready(sk);
return 0;
}
static void link_vcc(struct clip_vcc *clip_vcc, struct atmarp_entry *entry)
{
pr_debug("%p to entry %p (neigh %p)\n", clip_vcc, entry, entry->neigh);
clip_vcc->entry = entry;
clip_vcc->xoff = 0; /* @@@ may overrun buffer by one packet */
clip_vcc->next = entry->vccs;
entry->vccs = clip_vcc;
entry->neigh->used = jiffies;
}
static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
{
struct atmarp_entry *entry = clip_vcc->entry;
struct clip_vcc **walk;
if (!entry) {
pr_err("!clip_vcc->entry (clip_vcc %p)\n", clip_vcc);
return;
}
netif_tx_lock_bh(entry->neigh->dev); /* block clip_start_xmit() */
entry->neigh->used = jiffies;
for (walk = &entry->vccs; *walk; walk = &(*walk)->next)
if (*walk == clip_vcc) {
int error;
*walk = clip_vcc->next; /* atomic */
clip_vcc->entry = NULL;
if (clip_vcc->xoff)
netif_wake_queue(entry->neigh->dev);
if (entry->vccs)
goto out;
entry->expires = jiffies - 1;
/* force resolution or expiration */
error = neigh_update(entry->neigh, NULL, NUD_NONE,
NEIGH_UPDATE_F_ADMIN, 0);
if (error)
pr_err("neigh_update failed with %d\n", error);
goto out;
}
pr_err("ATMARP: failed (entry %p, vcc 0x%p)\n", entry, clip_vcc);
out:
netif_tx_unlock_bh(entry->neigh->dev);
}
/* The neighbour entry n->lock is held. */
static int neigh_check_cb(struct neighbour *n)
{
struct atmarp_entry *entry = neighbour_priv(n);
struct clip_vcc *cv;
if (n->ops != &clip_neigh_ops)
return 0;
for (cv = entry->vccs; cv; cv = cv->next) {
unsigned long exp = cv->last_use + cv->idle_timeout;
if (cv->idle_timeout && time_after(jiffies, exp)) {
pr_debug("releasing vcc %p->%p of entry %p\n",
cv, cv->vcc, entry);
vcc_release_async(cv->vcc, -ETIMEDOUT);
}
}
if (entry->vccs || time_before(jiffies, entry->expires))
return 0;
if (refcount_read(&n->refcnt) > 1) {
struct sk_buff *skb;
pr_debug("destruction postponed with ref %d\n",
refcount_read(&n->refcnt));
while ((skb = skb_dequeue(&n->arp_queue)) != NULL)
dev_kfree_skb(skb);
return 0;
}
pr_debug("expired neigh %p\n", n);
return 1;
}
static void idle_timer_check(struct timer_list *unused)
{
write_lock(&arp_tbl.lock);
__neigh_for_each_release(&arp_tbl, neigh_check_cb);
mod_timer(&idle_timer, jiffies + CLIP_CHECK_INTERVAL * HZ);
write_unlock(&arp_tbl.lock);
}
static int clip_arp_rcv(struct sk_buff *skb)
{
struct atm_vcc *vcc;
pr_debug("\n");
vcc = ATM_SKB(skb)->vcc;
if (!vcc || !atm_charge(vcc, skb->truesize)) {
dev_kfree_skb_any(skb);
return 0;
}
pr_debug("pushing to %p\n", vcc);
pr_debug("using %p\n", CLIP_VCC(vcc)->old_push);
CLIP_VCC(vcc)->old_push(vcc, skb);
return 0;
}
static const unsigned char llc_oui[] = {
0xaa, /* DSAP: non-ISO */
0xaa, /* SSAP: non-ISO */
0x03, /* Ctrl: Unnumbered Information Command PDU */
0x00, /* OUI: EtherType */
0x00,
0x00
};
static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb)
{
struct clip_vcc *clip_vcc = CLIP_VCC(vcc);
pr_debug("\n");
if (!clip_devs) {
atm_return(vcc, skb->truesize);
kfree_skb(skb);
return;
}
if (!skb) {
pr_debug("removing VCC %p\n", clip_vcc);
if (clip_vcc->entry)
unlink_clip_vcc(clip_vcc);
clip_vcc->old_push(vcc, NULL); /* pass on the bad news */
kfree(clip_vcc);
return;
}
atm_return(vcc, skb->truesize);
skb->dev = clip_vcc->entry ? clip_vcc->entry->neigh->dev : clip_devs;
/* clip_vcc->entry == NULL if we don't have an IP address yet */
if (!skb->dev) {
dev_kfree_skb_any(skb);
return;
}
ATM_SKB(skb)->vcc = vcc;
skb_reset_mac_header(skb);
if (!clip_vcc->encap ||
skb->len < RFC1483LLC_LEN ||
memcmp(skb->data, llc_oui, sizeof(llc_oui)))
skb->protocol = htons(ETH_P_IP);
else {
skb->protocol = ((__be16 *)skb->data)[3];
skb_pull(skb, RFC1483LLC_LEN);
if (skb->protocol == htons(ETH_P_ARP)) {
skb->dev->stats.rx_packets++;
skb->dev->stats.rx_bytes += skb->len;
clip_arp_rcv(skb);
return;
}
}
clip_vcc->last_use = jiffies;
skb->dev->stats.rx_packets++;
skb->dev->stats.rx_bytes += skb->len;
memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data));
netif_rx(skb);
}
/*
* Note: these spinlocks _must_not_ block on non-SMP. The only goal is that
* clip_pop is atomic with respect to the critical section in clip_start_xmit.
*/
static void clip_pop(struct atm_vcc *vcc, struct sk_buff *skb)
{
struct clip_vcc *clip_vcc = CLIP_VCC(vcc);
struct net_device *dev = skb->dev;
int old;
unsigned long flags;
pr_debug("(vcc %p)\n", vcc);
clip_vcc->old_pop(vcc, skb);
/* skb->dev == NULL in outbound ARP packets */
if (!dev)
return;
spin_lock_irqsave(&PRIV(dev)->xoff_lock, flags);
if (atm_may_send(vcc, 0)) {
old = xchg(&clip_vcc->xoff, 0);
if (old)
netif_wake_queue(dev);
}
spin_unlock_irqrestore(&PRIV(dev)->xoff_lock, flags);
}
static void clip_neigh_solicit(struct neighbour *neigh, struct sk_buff *skb)
{
__be32 *ip = (__be32 *) neigh->primary_key;
pr_debug("(neigh %p, skb %p)\n", neigh, skb);
to_atmarpd(act_need, PRIV(neigh->dev)->number, *ip);
}
static void clip_neigh_error(struct neighbour *neigh, struct sk_buff *skb)
{
#ifndef CONFIG_ATM_CLIP_NO_ICMP
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
#endif
kfree_skb(skb);
}
static const struct neigh_ops clip_neigh_ops = {
.family = AF_INET,
.solicit = clip_neigh_solicit,
.error_report = clip_neigh_error,
.output = neigh_direct_output,
.connected_output = neigh_direct_output,
};
static int clip_constructor(struct net_device *dev, struct neighbour *neigh)
{
struct atmarp_entry *entry = neighbour_priv(neigh);
if (neigh->tbl->family != AF_INET)
return -EINVAL;
if (neigh->type != RTN_UNICAST)
return -EINVAL;
neigh->nud_state = NUD_NONE;
neigh->ops = &clip_neigh_ops;
neigh->output = neigh->ops->output;
entry->neigh = neigh;
entry->vccs = NULL;
entry->expires = jiffies - 1;
return 0;
}
/* @@@ copy bh locking from arp.c -- need to bh-enable atm code before */
/*
* We play with the resolve flag: 0 and 1 have the usual meaning, but -1 means
* to allocate the neighbour entry but not to ask atmarpd for resolution. Also,
* don't increment the usage count. This is used to create entries in
* clip_setentry.
*/
static int clip_encap(struct atm_vcc *vcc, int mode)
{
if (!CLIP_VCC(vcc))
return -EBADFD;
CLIP_VCC(vcc)->encap = mode;
return 0;
}
static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct clip_priv *clip_priv = PRIV(dev);
struct dst_entry *dst = skb_dst(skb);
struct atmarp_entry *entry;
struct neighbour *n;
struct atm_vcc *vcc;
struct rtable *rt;
__be32 *daddr;
int old;
unsigned long flags;
pr_debug("(skb %p)\n", skb);
if (!dst) {
pr_err("skb_dst(skb) == NULL\n");
dev_kfree_skb(skb);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
rt = (struct rtable *) dst;
if (rt->rt_gw_family == AF_INET)
daddr = &rt->rt_gw4;
else
daddr = &ip_hdr(skb)->daddr;
n = dst_neigh_lookup(dst, daddr);
if (!n) {
pr_err("NO NEIGHBOUR !\n");
dev_kfree_skb(skb);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
entry = neighbour_priv(n);
if (!entry->vccs) {
if (time_after(jiffies, entry->expires)) {
/* should be resolved */
entry->expires = jiffies + ATMARP_RETRY_DELAY * HZ;
to_atmarpd(act_need, PRIV(dev)->number, *((__be32 *)n->primary_key));
}
if (entry->neigh->arp_queue.qlen < ATMARP_MAX_UNRES_PACKETS)
skb_queue_tail(&entry->neigh->arp_queue, skb);
else {
dev_kfree_skb(skb);
dev->stats.tx_dropped++;
}
goto out_release_neigh;
}
pr_debug("neigh %p, vccs %p\n", entry, entry->vccs);
ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc;
pr_debug("using neighbour %p, vcc %p\n", n, vcc);
if (entry->vccs->encap) {
void *here;
here = skb_push(skb, RFC1483LLC_LEN);
memcpy(here, llc_oui, sizeof(llc_oui));
((__be16 *) here)[3] = skb->protocol;
}
atm_account_tx(vcc, skb);
entry->vccs->last_use = jiffies;
pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev);
old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */
if (old) {
pr_warn("XOFF->XOFF transition\n");
goto out_release_neigh;
}
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
vcc->send(vcc, skb);
if (atm_may_send(vcc, 0)) {
entry->vccs->xoff = 0;
goto out_release_neigh;
}
spin_lock_irqsave(&clip_priv->xoff_lock, flags);
netif_stop_queue(dev); /* XOFF -> throttle immediately */
barrier();
if (!entry->vccs->xoff)
netif_start_queue(dev);
/* Oh, we just raced with clip_pop. netif_start_queue should be
good enough, because nothing should really be asleep because
of the brief netif_stop_queue. If this isn't true or if it
changes, use netif_wake_queue instead. */
spin_unlock_irqrestore(&clip_priv->xoff_lock, flags);
out_release_neigh:
neigh_release(n);
return NETDEV_TX_OK;
}
static int clip_mkip(struct atm_vcc *vcc, int timeout)
{
struct clip_vcc *clip_vcc;
if (!vcc->push)
return -EBADFD;
clip_vcc = kmalloc(sizeof(struct clip_vcc), GFP_KERNEL);
if (!clip_vcc)
return -ENOMEM;
pr_debug("%p vcc %p\n", clip_vcc, vcc);
clip_vcc->vcc = vcc;
vcc->user_back = clip_vcc;
set_bit(ATM_VF_IS_CLIP, &vcc->flags);
clip_vcc->entry = NULL;
clip_vcc->xoff = 0;
clip_vcc->encap = 1;
clip_vcc->last_use = jiffies;
clip_vcc->idle_timeout = timeout * HZ;
clip_vcc->old_push = vcc->push;
clip_vcc->old_pop = vcc->pop;
vcc->push = clip_push;
vcc->pop = clip_pop;
/* re-process everything received between connection setup and MKIP */
vcc_process_recv_queue(vcc);
return 0;
}
static int clip_setentry(struct atm_vcc *vcc, __be32 ip)
{
struct neighbour *neigh;
struct atmarp_entry *entry;
int error;
struct clip_vcc *clip_vcc;
struct rtable *rt;
if (vcc->push != clip_push) {
pr_warn("non-CLIP VCC\n");
return -EBADF;
}
clip_vcc = CLIP_VCC(vcc);
if (!ip) {
if (!clip_vcc->entry) {
pr_err("hiding hidden ATMARP entry\n");
return 0;
}
pr_debug("remove\n");
unlink_clip_vcc(clip_vcc);
return 0;
}
rt = ip_route_output(&init_net, ip, 0, 1, 0);
if (IS_ERR(rt))
return PTR_ERR(rt);
neigh = __neigh_lookup(&arp_tbl, &ip, rt->dst.dev, 1);
ip_rt_put(rt);
if (!neigh)
return -ENOMEM;
entry = neighbour_priv(neigh);
if (entry != clip_vcc->entry) {
if (!clip_vcc->entry)
pr_debug("add\n");
else {
pr_debug("update\n");
unlink_clip_vcc(clip_vcc);
}
link_vcc(clip_vcc, entry);
}
error = neigh_update(neigh, llc_oui, NUD_PERMANENT,
NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN, 0);
neigh_release(neigh);
return error;
}
static const struct net_device_ops clip_netdev_ops = {
.ndo_start_xmit = clip_start_xmit,
.ndo_neigh_construct = clip_constructor,
};
static void clip_setup(struct net_device *dev)
{
dev->netdev_ops = &clip_netdev_ops;
dev->type = ARPHRD_ATM;
dev->neigh_priv_len = sizeof(struct atmarp_entry);
dev->hard_header_len = RFC1483LLC_LEN;
dev->mtu = RFC1626_MTU;
dev->tx_queue_len = 100; /* "normal" queue (packets) */
/* When using a "real" qdisc, the qdisc determines the queue */
/* length. tx_queue_len is only used for the default case, */
/* without any more elaborate queuing. 100 is a reasonable */
/* compromise between decent burst-tolerance and protection */
/* against memory hogs. */
netif_keep_dst(dev);
}
static int clip_create(int number)
{
struct net_device *dev;
struct clip_priv *clip_priv;
int error;
if (number != -1) {
for (dev = clip_devs; dev; dev = PRIV(dev)->next)
if (PRIV(dev)->number == number)
return -EEXIST;
} else {
number = 0;
for (dev = clip_devs; dev; dev = PRIV(dev)->next)
if (PRIV(dev)->number >= number)
number = PRIV(dev)->number + 1;
}
dev = alloc_netdev(sizeof(struct clip_priv), "", NET_NAME_UNKNOWN,
clip_setup);
if (!dev)
return -ENOMEM;
clip_priv = PRIV(dev);
sprintf(dev->name, "atm%d", number);
spin_lock_init(&clip_priv->xoff_lock);
clip_priv->number = number;
error = register_netdev(dev);
if (error) {
free_netdev(dev);
return error;
}
clip_priv->next = clip_devs;
clip_devs = dev;
pr_debug("registered (net:%s)\n", dev->name);
return number;
}
static int clip_device_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
if (event == NETDEV_UNREGISTER)
return NOTIFY_DONE;
/* ignore non-CLIP devices */
if (dev->type != ARPHRD_ATM || dev->netdev_ops != &clip_netdev_ops)
return NOTIFY_DONE;
switch (event) {
case NETDEV_UP:
pr_debug("NETDEV_UP\n");
to_atmarpd(act_up, PRIV(dev)->number, 0);
break;
case NETDEV_GOING_DOWN:
pr_debug("NETDEV_DOWN\n");
to_atmarpd(act_down, PRIV(dev)->number, 0);
break;
case NETDEV_CHANGE:
case NETDEV_CHANGEMTU:
pr_debug("NETDEV_CHANGE*\n");
to_atmarpd(act_change, PRIV(dev)->number, 0);
break;
}
return NOTIFY_DONE;
}
static int clip_inet_event(struct notifier_block *this, unsigned long event,
void *ifa)
{
struct in_device *in_dev;
struct netdev_notifier_info info;
in_dev = ((struct in_ifaddr *)ifa)->ifa_dev;
/*
* Transitions are of the down-change-up type, so it's sufficient to
* handle the change on up.
*/
if (event != NETDEV_UP)
return NOTIFY_DONE;
netdev_notifier_info_init(&info, in_dev->dev);
return clip_device_event(this, NETDEV_CHANGE, &info);
}
static struct notifier_block clip_dev_notifier = {
.notifier_call = clip_device_event,
};
static struct notifier_block clip_inet_notifier = {
.notifier_call = clip_inet_event,
};
static void atmarpd_close(struct atm_vcc *vcc)
{
pr_debug("\n");
rtnl_lock();
atmarpd = NULL;
skb_queue_purge(&sk_atm(vcc)->sk_receive_queue);
rtnl_unlock();
pr_debug("(done)\n");
module_put(THIS_MODULE);
}
static const struct atmdev_ops atmarpd_dev_ops = {
.close = atmarpd_close
};
static struct atm_dev atmarpd_dev = {
.ops = &atmarpd_dev_ops,
.type = "arpd",
.number = 999,
.lock = __SPIN_LOCK_UNLOCKED(atmarpd_dev.lock)
};
static int atm_init_atmarp(struct atm_vcc *vcc)
{
rtnl_lock();
if (atmarpd) {
rtnl_unlock();
return -EADDRINUSE;
}
mod_timer(&idle_timer, jiffies + CLIP_CHECK_INTERVAL * HZ);
atmarpd = vcc;
set_bit(ATM_VF_META, &vcc->flags);
set_bit(ATM_VF_READY, &vcc->flags);
/* allow replies and avoid getting closed if signaling dies */
vcc->dev = &atmarpd_dev;
vcc_insert_socket(sk_atm(vcc));
vcc->push = NULL;
vcc->pop = NULL; /* crash */
vcc->push_oam = NULL; /* crash */
rtnl_unlock();
return 0;
}
static int clip_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct atm_vcc *vcc = ATM_SD(sock);
int err = 0;
switch (cmd) {
case SIOCMKCLIP:
case ATMARPD_CTRL:
case ATMARP_MKIP:
case ATMARP_SETENTRY:
case ATMARP_ENCAP:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
break;
default:
return -ENOIOCTLCMD;
}
switch (cmd) {
case SIOCMKCLIP:
err = clip_create(arg);
break;
case ATMARPD_CTRL:
err = atm_init_atmarp(vcc);
if (!err) {
sock->state = SS_CONNECTED;
__module_get(THIS_MODULE);
}
break;
case ATMARP_MKIP:
err = clip_mkip(vcc, arg);
break;
case ATMARP_SETENTRY:
err = clip_setentry(vcc, (__force __be32)arg);
break;
case ATMARP_ENCAP:
err = clip_encap(vcc, arg);
break;
}
return err;
}
static struct atm_ioctl clip_ioctl_ops = {
.owner = THIS_MODULE,
.ioctl = clip_ioctl,
};
#ifdef CONFIG_PROC_FS
static void svc_addr(struct seq_file *seq, struct sockaddr_atmsvc *addr)
{
static int code[] = { 1, 2, 10, 6, 1, 0 };
static int e164[] = { 1, 8, 4, 6, 1, 0 };
if (*addr->sas_addr.pub) {
seq_printf(seq, "%s", addr->sas_addr.pub);
if (*addr->sas_addr.prv)
seq_putc(seq, '+');
} else if (!*addr->sas_addr.prv) {
seq_printf(seq, "%s", "(none)");
return;
}
if (*addr->sas_addr.prv) {
unsigned char *prv = addr->sas_addr.prv;
int *fields;
int i, j;
fields = *prv == ATM_AFI_E164 ? e164 : code;
for (i = 0; fields[i]; i++) {
for (j = fields[i]; j; j--)
seq_printf(seq, "%02X", *prv++);
if (fields[i + 1])
seq_putc(seq, '.');
}
}
}
/* This means the neighbour entry has no attached VCC objects. */
#define SEQ_NO_VCC_TOKEN ((void *) 2)
static void atmarp_info(struct seq_file *seq, struct neighbour *n,
struct atmarp_entry *entry, struct clip_vcc *clip_vcc)
{
struct net_device *dev = n->dev;
unsigned long exp;
char buf[17];
int svc, llc, off;
svc = ((clip_vcc == SEQ_NO_VCC_TOKEN) ||
(sk_atm(clip_vcc->vcc)->sk_family == AF_ATMSVC));
llc = ((clip_vcc == SEQ_NO_VCC_TOKEN) || clip_vcc->encap);
if (clip_vcc == SEQ_NO_VCC_TOKEN)
exp = entry->neigh->used;
else
exp = clip_vcc->last_use;
exp = (jiffies - exp) / HZ;
seq_printf(seq, "%-6s%-4s%-4s%5ld ",
dev->name, svc ? "SVC" : "PVC", llc ? "LLC" : "NULL", exp);
off = scnprintf(buf, sizeof(buf) - 1, "%pI4", n->primary_key);
while (off < 16)
buf[off++] = ' ';
buf[off] = '\0';
seq_printf(seq, "%s", buf);
if (clip_vcc == SEQ_NO_VCC_TOKEN) {
if (time_before(jiffies, entry->expires))
seq_printf(seq, "(resolving)\n");
else
seq_printf(seq, "(expired, ref %d)\n",
refcount_read(&entry->neigh->refcnt));
} else if (!svc) {
seq_printf(seq, "%d.%d.%d\n",
clip_vcc->vcc->dev->number,
clip_vcc->vcc->vpi, clip_vcc->vcc->vci);
} else {
svc_addr(seq, &clip_vcc->vcc->remote);
seq_putc(seq, '\n');
}
}
struct clip_seq_state {
/* This member must be first. */
struct neigh_seq_state ns;
/* Local to clip specific iteration. */
struct clip_vcc *vcc;
};
static struct clip_vcc *clip_seq_next_vcc(struct atmarp_entry *e,
struct clip_vcc *curr)
{
if (!curr) {
curr = e->vccs;
if (!curr)
return SEQ_NO_VCC_TOKEN;
return curr;
}
if (curr == SEQ_NO_VCC_TOKEN)
return NULL;
curr = curr->next;
return curr;
}
static void *clip_seq_vcc_walk(struct clip_seq_state *state,
struct atmarp_entry *e, loff_t * pos)
{
struct clip_vcc *vcc = state->vcc;
vcc = clip_seq_next_vcc(e, vcc);
if (vcc && pos != NULL) {
while (*pos) {
vcc = clip_seq_next_vcc(e, vcc);
if (!vcc)
break;
--(*pos);
}
}
state->vcc = vcc;
return vcc;
}
static void *clip_seq_sub_iter(struct neigh_seq_state *_state,
struct neighbour *n, loff_t * pos)
{
struct clip_seq_state *state = (struct clip_seq_state *)_state;
if (n->dev->type != ARPHRD_ATM)
return NULL;
return clip_seq_vcc_walk(state, neighbour_priv(n), pos);
}
static void *clip_seq_start(struct seq_file *seq, loff_t * pos)
{
struct clip_seq_state *state = seq->private;
state->ns.neigh_sub_iter = clip_seq_sub_iter;
return neigh_seq_start(seq, pos, &arp_tbl, NEIGH_SEQ_NEIGH_ONLY);
}
static int clip_seq_show(struct seq_file *seq, void *v)
{
static char atm_arp_banner[] =
"IPitf TypeEncp Idle IP address ATM address\n";
if (v == SEQ_START_TOKEN) {
seq_puts(seq, atm_arp_banner);
} else {
struct clip_seq_state *state = seq->private;
struct clip_vcc *vcc = state->vcc;
struct neighbour *n = v;
atmarp_info(seq, n, neighbour_priv(n), vcc);
}
return 0;
}
static const struct seq_operations arp_seq_ops = {
.start = clip_seq_start,
.next = neigh_seq_next,
.stop = neigh_seq_stop,
.show = clip_seq_show,
};
#endif
static void atm_clip_exit_noproc(void);
static int __init atm_clip_init(void)
{
register_atm_ioctl(&clip_ioctl_ops);
register_netdevice_notifier(&clip_dev_notifier);
register_inetaddr_notifier(&clip_inet_notifier);
timer_setup(&idle_timer, idle_timer_check, 0);
#ifdef CONFIG_PROC_FS
{
struct proc_dir_entry *p;
p = proc_create_net("arp", 0444, atm_proc_root, &arp_seq_ops,
sizeof(struct clip_seq_state));
if (!p) {
pr_err("Unable to initialize /proc/net/atm/arp\n");
atm_clip_exit_noproc();
return -ENOMEM;
}
}
#endif
return 0;
}
static void atm_clip_exit_noproc(void)
{
struct net_device *dev, *next;
unregister_inetaddr_notifier(&clip_inet_notifier);
unregister_netdevice_notifier(&clip_dev_notifier);
deregister_atm_ioctl(&clip_ioctl_ops);
/* First, stop the idle timer, so it stops banging
* on the table.
*/
del_timer_sync(&idle_timer);
dev = clip_devs;
while (dev) {
next = PRIV(dev)->next;
unregister_netdev(dev);
free_netdev(dev);
dev = next;
}
}
static void __exit atm_clip_exit(void)
{
remove_proc_entry("arp", atm_proc_root);
atm_clip_exit_noproc();
}
module_init(atm_clip_init);
module_exit(atm_clip_exit);
MODULE_AUTHOR("Werner Almesberger");
MODULE_DESCRIPTION("Classical/IP over ATM interface");
MODULE_LICENSE("GPL");
| linux-master | net/atm/clip.c |
// SPDX-License-Identifier: GPL-2.0-only
/* net/atm/common.c - ATM sockets (common part for PVC and SVC) */
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/net.h> /* struct socket, struct proto_ops */
#include <linux/atm.h> /* ATM stuff */
#include <linux/atmdev.h>
#include <linux/socket.h> /* SOL_SOCKET */
#include <linux/errno.h> /* error codes */
#include <linux/capability.h>
#include <linux/mm.h>
#include <linux/sched/signal.h>
#include <linux/time64.h> /* 64-bit time for seconds */
#include <linux/skbuff.h>
#include <linux/bitops.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <net/sock.h> /* struct sock */
#include <linux/uaccess.h>
#include <linux/poll.h>
#include <linux/atomic.h>
#include "resources.h" /* atm_find_dev */
#include "common.h" /* prototypes */
#include "protocols.h" /* atm_init_<transport> */
#include "addr.h" /* address registry */
#include "signaling.h" /* for WAITING and sigd_attach */
struct hlist_head vcc_hash[VCC_HTABLE_SIZE];
EXPORT_SYMBOL(vcc_hash);
DEFINE_RWLOCK(vcc_sklist_lock);
EXPORT_SYMBOL(vcc_sklist_lock);
static ATOMIC_NOTIFIER_HEAD(atm_dev_notify_chain);
static void __vcc_insert_socket(struct sock *sk)
{
struct atm_vcc *vcc = atm_sk(sk);
struct hlist_head *head = &vcc_hash[vcc->vci & (VCC_HTABLE_SIZE - 1)];
sk->sk_hash = vcc->vci & (VCC_HTABLE_SIZE - 1);
sk_add_node(sk, head);
}
void vcc_insert_socket(struct sock *sk)
{
write_lock_irq(&vcc_sklist_lock);
__vcc_insert_socket(sk);
write_unlock_irq(&vcc_sklist_lock);
}
EXPORT_SYMBOL(vcc_insert_socket);
static void vcc_remove_socket(struct sock *sk)
{
write_lock_irq(&vcc_sklist_lock);
sk_del_node_init(sk);
write_unlock_irq(&vcc_sklist_lock);
}
static bool vcc_tx_ready(struct atm_vcc *vcc, unsigned int size)
{
struct sock *sk = sk_atm(vcc);
if (sk_wmem_alloc_get(sk) && !atm_may_send(vcc, size)) {
pr_debug("Sorry: wmem_alloc = %d, size = %d, sndbuf = %d\n",
sk_wmem_alloc_get(sk), size, sk->sk_sndbuf);
return false;
}
return true;
}
static void vcc_sock_destruct(struct sock *sk)
{
if (atomic_read(&sk->sk_rmem_alloc))
printk(KERN_DEBUG "%s: rmem leakage (%d bytes) detected.\n",
__func__, atomic_read(&sk->sk_rmem_alloc));
if (refcount_read(&sk->sk_wmem_alloc))
printk(KERN_DEBUG "%s: wmem leakage (%d bytes) detected.\n",
__func__, refcount_read(&sk->sk_wmem_alloc));
}
static void vcc_def_wakeup(struct sock *sk)
{
struct socket_wq *wq;
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
wake_up(&wq->wait);
rcu_read_unlock();
}
static inline int vcc_writable(struct sock *sk)
{
struct atm_vcc *vcc = atm_sk(sk);
return (vcc->qos.txtp.max_sdu +
refcount_read(&sk->sk_wmem_alloc)) <= sk->sk_sndbuf;
}
static void vcc_write_space(struct sock *sk)
{
struct socket_wq *wq;
rcu_read_lock();
if (vcc_writable(sk)) {
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
wake_up_interruptible(&wq->wait);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
rcu_read_unlock();
}
static void vcc_release_cb(struct sock *sk)
{
struct atm_vcc *vcc = atm_sk(sk);
if (vcc->release_cb)
vcc->release_cb(vcc);
}
static struct proto vcc_proto = {
.name = "VCC",
.owner = THIS_MODULE,
.obj_size = sizeof(struct atm_vcc),
.release_cb = vcc_release_cb,
};
int vcc_create(struct net *net, struct socket *sock, int protocol, int family, int kern)
{
struct sock *sk;
struct atm_vcc *vcc;
sock->sk = NULL;
if (sock->type == SOCK_STREAM)
return -EINVAL;
sk = sk_alloc(net, family, GFP_KERNEL, &vcc_proto, kern);
if (!sk)
return -ENOMEM;
sock_init_data(sock, sk);
sk->sk_state_change = vcc_def_wakeup;
sk->sk_write_space = vcc_write_space;
vcc = atm_sk(sk);
vcc->dev = NULL;
memset(&vcc->local, 0, sizeof(struct sockaddr_atmsvc));
memset(&vcc->remote, 0, sizeof(struct sockaddr_atmsvc));
vcc->qos.txtp.max_sdu = 1 << 16; /* for meta VCs */
refcount_set(&sk->sk_wmem_alloc, 1);
atomic_set(&sk->sk_rmem_alloc, 0);
vcc->push = NULL;
vcc->pop = NULL;
vcc->owner = NULL;
vcc->push_oam = NULL;
vcc->release_cb = NULL;
vcc->vpi = vcc->vci = 0; /* no VCI/VPI yet */
vcc->atm_options = vcc->aal_options = 0;
sk->sk_destruct = vcc_sock_destruct;
return 0;
}
static void vcc_destroy_socket(struct sock *sk)
{
struct atm_vcc *vcc = atm_sk(sk);
struct sk_buff *skb;
set_bit(ATM_VF_CLOSE, &vcc->flags);
clear_bit(ATM_VF_READY, &vcc->flags);
if (vcc->dev && vcc->dev->ops->close)
vcc->dev->ops->close(vcc);
if (vcc->push)
vcc->push(vcc, NULL); /* atmarpd has no push */
module_put(vcc->owner);
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
atm_return(vcc, skb->truesize);
kfree_skb(skb);
}
if (vcc->dev && vcc->dev->ops->owner) {
module_put(vcc->dev->ops->owner);
atm_dev_put(vcc->dev);
}
vcc_remove_socket(sk);
}
int vcc_release(struct socket *sock)
{
struct sock *sk = sock->sk;
if (sk) {
lock_sock(sk);
vcc_destroy_socket(sock->sk);
release_sock(sk);
sock_put(sk);
}
return 0;
}
void vcc_release_async(struct atm_vcc *vcc, int reply)
{
struct sock *sk = sk_atm(vcc);
set_bit(ATM_VF_CLOSE, &vcc->flags);
sk->sk_shutdown |= RCV_SHUTDOWN;
sk->sk_err = -reply;
clear_bit(ATM_VF_WAITING, &vcc->flags);
sk->sk_state_change(sk);
}
EXPORT_SYMBOL(vcc_release_async);
void vcc_process_recv_queue(struct atm_vcc *vcc)
{
struct sk_buff_head queue, *rq;
struct sk_buff *skb, *tmp;
unsigned long flags;
__skb_queue_head_init(&queue);
rq = &sk_atm(vcc)->sk_receive_queue;
spin_lock_irqsave(&rq->lock, flags);
skb_queue_splice_init(rq, &queue);
spin_unlock_irqrestore(&rq->lock, flags);
skb_queue_walk_safe(&queue, skb, tmp) {
__skb_unlink(skb, &queue);
vcc->push(vcc, skb);
}
}
EXPORT_SYMBOL(vcc_process_recv_queue);
void atm_dev_signal_change(struct atm_dev *dev, char signal)
{
pr_debug("%s signal=%d dev=%p number=%d dev->signal=%d\n",
__func__, signal, dev, dev->number, dev->signal);
/* atm driver sending invalid signal */
WARN_ON(signal < ATM_PHY_SIG_LOST || signal > ATM_PHY_SIG_FOUND);
if (dev->signal == signal)
return; /* no change */
dev->signal = signal;
atomic_notifier_call_chain(&atm_dev_notify_chain, signal, dev);
}
EXPORT_SYMBOL(atm_dev_signal_change);
void atm_dev_release_vccs(struct atm_dev *dev)
{
int i;
write_lock_irq(&vcc_sklist_lock);
for (i = 0; i < VCC_HTABLE_SIZE; i++) {
struct hlist_head *head = &vcc_hash[i];
struct hlist_node *tmp;
struct sock *s;
struct atm_vcc *vcc;
sk_for_each_safe(s, tmp, head) {
vcc = atm_sk(s);
if (vcc->dev == dev) {
vcc_release_async(vcc, -EPIPE);
sk_del_node_init(s);
}
}
}
write_unlock_irq(&vcc_sklist_lock);
}
EXPORT_SYMBOL(atm_dev_release_vccs);
static int adjust_tp(struct atm_trafprm *tp, unsigned char aal)
{
int max_sdu;
if (!tp->traffic_class)
return 0;
switch (aal) {
case ATM_AAL0:
max_sdu = ATM_CELL_SIZE-1;
break;
case ATM_AAL34:
max_sdu = ATM_MAX_AAL34_PDU;
break;
default:
pr_warn("AAL problems ... (%d)\n", aal);
fallthrough;
case ATM_AAL5:
max_sdu = ATM_MAX_AAL5_PDU;
}
if (!tp->max_sdu)
tp->max_sdu = max_sdu;
else if (tp->max_sdu > max_sdu)
return -EINVAL;
if (!tp->max_cdv)
tp->max_cdv = ATM_MAX_CDV;
return 0;
}
static int check_ci(const struct atm_vcc *vcc, short vpi, int vci)
{
struct hlist_head *head = &vcc_hash[vci & (VCC_HTABLE_SIZE - 1)];
struct sock *s;
struct atm_vcc *walk;
sk_for_each(s, head) {
walk = atm_sk(s);
if (walk->dev != vcc->dev)
continue;
if (test_bit(ATM_VF_ADDR, &walk->flags) && walk->vpi == vpi &&
walk->vci == vci && ((walk->qos.txtp.traffic_class !=
ATM_NONE && vcc->qos.txtp.traffic_class != ATM_NONE) ||
(walk->qos.rxtp.traffic_class != ATM_NONE &&
vcc->qos.rxtp.traffic_class != ATM_NONE)))
return -EADDRINUSE;
}
/* allow VCCs with same VPI/VCI iff they don't collide on
TX/RX (but we may refuse such sharing for other reasons,
e.g. if protocol requires to have both channels) */
return 0;
}
static int find_ci(const struct atm_vcc *vcc, short *vpi, int *vci)
{
static short p; /* poor man's per-device cache */
static int c;
short old_p;
int old_c;
int err;
if (*vpi != ATM_VPI_ANY && *vci != ATM_VCI_ANY) {
err = check_ci(vcc, *vpi, *vci);
return err;
}
/* last scan may have left values out of bounds for current device */
if (*vpi != ATM_VPI_ANY)
p = *vpi;
else if (p >= 1 << vcc->dev->ci_range.vpi_bits)
p = 0;
if (*vci != ATM_VCI_ANY)
c = *vci;
else if (c < ATM_NOT_RSV_VCI || c >= 1 << vcc->dev->ci_range.vci_bits)
c = ATM_NOT_RSV_VCI;
old_p = p;
old_c = c;
do {
if (!check_ci(vcc, p, c)) {
*vpi = p;
*vci = c;
return 0;
}
if (*vci == ATM_VCI_ANY) {
c++;
if (c >= 1 << vcc->dev->ci_range.vci_bits)
c = ATM_NOT_RSV_VCI;
}
if ((c == ATM_NOT_RSV_VCI || *vci != ATM_VCI_ANY) &&
*vpi == ATM_VPI_ANY) {
p++;
if (p >= 1 << vcc->dev->ci_range.vpi_bits)
p = 0;
}
} while (old_p != p || old_c != c);
return -EADDRINUSE;
}
static int __vcc_connect(struct atm_vcc *vcc, struct atm_dev *dev, short vpi,
int vci)
{
struct sock *sk = sk_atm(vcc);
int error;
if ((vpi != ATM_VPI_UNSPEC && vpi != ATM_VPI_ANY &&
vpi >> dev->ci_range.vpi_bits) || (vci != ATM_VCI_UNSPEC &&
vci != ATM_VCI_ANY && vci >> dev->ci_range.vci_bits))
return -EINVAL;
if (vci > 0 && vci < ATM_NOT_RSV_VCI && !capable(CAP_NET_BIND_SERVICE))
return -EPERM;
error = -ENODEV;
if (!try_module_get(dev->ops->owner))
return error;
vcc->dev = dev;
write_lock_irq(&vcc_sklist_lock);
if (test_bit(ATM_DF_REMOVED, &dev->flags) ||
(error = find_ci(vcc, &vpi, &vci))) {
write_unlock_irq(&vcc_sklist_lock);
goto fail_module_put;
}
vcc->vpi = vpi;
vcc->vci = vci;
__vcc_insert_socket(sk);
write_unlock_irq(&vcc_sklist_lock);
switch (vcc->qos.aal) {
case ATM_AAL0:
error = atm_init_aal0(vcc);
vcc->stats = &dev->stats.aal0;
break;
case ATM_AAL34:
error = atm_init_aal34(vcc);
vcc->stats = &dev->stats.aal34;
break;
case ATM_NO_AAL:
/* ATM_AAL5 is also used in the "0 for default" case */
vcc->qos.aal = ATM_AAL5;
fallthrough;
case ATM_AAL5:
error = atm_init_aal5(vcc);
vcc->stats = &dev->stats.aal5;
break;
default:
error = -EPROTOTYPE;
}
if (!error)
error = adjust_tp(&vcc->qos.txtp, vcc->qos.aal);
if (!error)
error = adjust_tp(&vcc->qos.rxtp, vcc->qos.aal);
if (error)
goto fail;
pr_debug("VCC %d.%d, AAL %d\n", vpi, vci, vcc->qos.aal);
pr_debug(" TX: %d, PCR %d..%d, SDU %d\n",
vcc->qos.txtp.traffic_class,
vcc->qos.txtp.min_pcr,
vcc->qos.txtp.max_pcr,
vcc->qos.txtp.max_sdu);
pr_debug(" RX: %d, PCR %d..%d, SDU %d\n",
vcc->qos.rxtp.traffic_class,
vcc->qos.rxtp.min_pcr,
vcc->qos.rxtp.max_pcr,
vcc->qos.rxtp.max_sdu);
if (dev->ops->open) {
error = dev->ops->open(vcc);
if (error)
goto fail;
}
return 0;
fail:
vcc_remove_socket(sk);
fail_module_put:
module_put(dev->ops->owner);
/* ensure we get dev module ref count correct */
vcc->dev = NULL;
return error;
}
int vcc_connect(struct socket *sock, int itf, short vpi, int vci)
{
struct atm_dev *dev;
struct atm_vcc *vcc = ATM_SD(sock);
int error;
pr_debug("(vpi %d, vci %d)\n", vpi, vci);
if (sock->state == SS_CONNECTED)
return -EISCONN;
if (sock->state != SS_UNCONNECTED)
return -EINVAL;
if (!(vpi || vci))
return -EINVAL;
if (vpi != ATM_VPI_UNSPEC && vci != ATM_VCI_UNSPEC)
clear_bit(ATM_VF_PARTIAL, &vcc->flags);
else
if (test_bit(ATM_VF_PARTIAL, &vcc->flags))
return -EINVAL;
pr_debug("(TX: cl %d,bw %d-%d,sdu %d; "
"RX: cl %d,bw %d-%d,sdu %d,AAL %s%d)\n",
vcc->qos.txtp.traffic_class, vcc->qos.txtp.min_pcr,
vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_sdu,
vcc->qos.rxtp.traffic_class, vcc->qos.rxtp.min_pcr,
vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_sdu,
vcc->qos.aal == ATM_AAL5 ? "" :
vcc->qos.aal == ATM_AAL0 ? "" : " ??? code ",
vcc->qos.aal == ATM_AAL0 ? 0 : vcc->qos.aal);
if (!test_bit(ATM_VF_HASQOS, &vcc->flags))
return -EBADFD;
if (vcc->qos.txtp.traffic_class == ATM_ANYCLASS ||
vcc->qos.rxtp.traffic_class == ATM_ANYCLASS)
return -EINVAL;
if (likely(itf != ATM_ITF_ANY)) {
dev = try_then_request_module(atm_dev_lookup(itf),
"atm-device-%d", itf);
} else {
dev = NULL;
mutex_lock(&atm_dev_mutex);
if (!list_empty(&atm_devs)) {
dev = list_entry(atm_devs.next,
struct atm_dev, dev_list);
atm_dev_hold(dev);
}
mutex_unlock(&atm_dev_mutex);
}
if (!dev)
return -ENODEV;
error = __vcc_connect(vcc, dev, vpi, vci);
if (error) {
atm_dev_put(dev);
return error;
}
if (vpi == ATM_VPI_UNSPEC || vci == ATM_VCI_UNSPEC)
set_bit(ATM_VF_PARTIAL, &vcc->flags);
if (test_bit(ATM_VF_READY, &ATM_SD(sock)->flags))
sock->state = SS_CONNECTED;
return 0;
}
int vcc_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int flags)
{
struct sock *sk = sock->sk;
struct atm_vcc *vcc;
struct sk_buff *skb;
int copied, error = -EINVAL;
if (sock->state != SS_CONNECTED)
return -ENOTCONN;
/* only handle MSG_DONTWAIT and MSG_PEEK */
if (flags & ~(MSG_DONTWAIT | MSG_PEEK))
return -EOPNOTSUPP;
vcc = ATM_SD(sock);
if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
test_bit(ATM_VF_CLOSE, &vcc->flags) ||
!test_bit(ATM_VF_READY, &vcc->flags))
return 0;
skb = skb_recv_datagram(sk, flags, &error);
if (!skb)
return error;
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
error = skb_copy_datagram_msg(skb, 0, msg, copied);
if (error)
return error;
sock_recv_cmsgs(msg, sk, skb);
if (!(flags & MSG_PEEK)) {
pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc),
skb->truesize);
atm_return(vcc, skb->truesize);
}
skb_free_datagram(sk, skb);
return copied;
}
int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size)
{
struct sock *sk = sock->sk;
DEFINE_WAIT(wait);
struct atm_vcc *vcc;
struct sk_buff *skb;
int eff, error;
lock_sock(sk);
if (sock->state != SS_CONNECTED) {
error = -ENOTCONN;
goto out;
}
if (m->msg_name) {
error = -EISCONN;
goto out;
}
vcc = ATM_SD(sock);
if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
test_bit(ATM_VF_CLOSE, &vcc->flags) ||
!test_bit(ATM_VF_READY, &vcc->flags)) {
error = -EPIPE;
send_sig(SIGPIPE, current, 0);
goto out;
}
if (!size) {
error = 0;
goto out;
}
if (size > vcc->qos.txtp.max_sdu) {
error = -EMSGSIZE;
goto out;
}
eff = (size+3) & ~3; /* align to word boundary */
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
error = 0;
while (!vcc_tx_ready(vcc, eff)) {
if (m->msg_flags & MSG_DONTWAIT) {
error = -EAGAIN;
break;
}
schedule();
if (signal_pending(current)) {
error = -ERESTARTSYS;
break;
}
if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
test_bit(ATM_VF_CLOSE, &vcc->flags) ||
!test_bit(ATM_VF_READY, &vcc->flags)) {
error = -EPIPE;
send_sig(SIGPIPE, current, 0);
break;
}
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
}
finish_wait(sk_sleep(sk), &wait);
if (error)
goto out;
skb = alloc_skb(eff, GFP_KERNEL);
if (!skb) {
error = -ENOMEM;
goto out;
}
pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
atm_account_tx(vcc, skb);
skb->dev = NULL; /* for paths shared with net_device interfaces */
if (!copy_from_iter_full(skb_put(skb, size), size, &m->msg_iter)) {
kfree_skb(skb);
error = -EFAULT;
goto out;
}
if (eff != size)
memset(skb->data + size, 0, eff-size);
error = vcc->dev->ops->send(vcc, skb);
error = error ? error : size;
out:
release_sock(sk);
return error;
}
__poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
struct atm_vcc *vcc;
__poll_t mask;
sock_poll_wait(file, sock, wait);
mask = 0;
vcc = ATM_SD(sock);
/* exceptional events */
if (sk->sk_err)
mask = EPOLLERR;
if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
test_bit(ATM_VF_CLOSE, &vcc->flags))
mask |= EPOLLHUP;
/* readable? */
if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
mask |= EPOLLIN | EPOLLRDNORM;
/* writable? */
if (sock->state == SS_CONNECTING &&
test_bit(ATM_VF_WAITING, &vcc->flags))
return mask;
if (vcc->qos.txtp.traffic_class != ATM_NONE &&
vcc_writable(sk))
mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
return mask;
}
static int atm_change_qos(struct atm_vcc *vcc, struct atm_qos *qos)
{
int error;
/*
* Don't let the QoS change the already connected AAL type nor the
* traffic class.
*/
if (qos->aal != vcc->qos.aal ||
qos->rxtp.traffic_class != vcc->qos.rxtp.traffic_class ||
qos->txtp.traffic_class != vcc->qos.txtp.traffic_class)
return -EINVAL;
error = adjust_tp(&qos->txtp, qos->aal);
if (!error)
error = adjust_tp(&qos->rxtp, qos->aal);
if (error)
return error;
if (!vcc->dev->ops->change_qos)
return -EOPNOTSUPP;
if (sk_atm(vcc)->sk_family == AF_ATMPVC)
return vcc->dev->ops->change_qos(vcc, qos, ATM_MF_SET);
return svc_change_qos(vcc, qos);
}
static int check_tp(const struct atm_trafprm *tp)
{
/* @@@ Should be merged with adjust_tp */
if (!tp->traffic_class || tp->traffic_class == ATM_ANYCLASS)
return 0;
if (tp->traffic_class != ATM_UBR && !tp->min_pcr && !tp->pcr &&
!tp->max_pcr)
return -EINVAL;
if (tp->min_pcr == ATM_MAX_PCR)
return -EINVAL;
if (tp->min_pcr && tp->max_pcr && tp->max_pcr != ATM_MAX_PCR &&
tp->min_pcr > tp->max_pcr)
return -EINVAL;
/*
* We allow pcr to be outside [min_pcr,max_pcr], because later
* adjustment may still push it in the valid range.
*/
return 0;
}
static int check_qos(const struct atm_qos *qos)
{
int error;
if (!qos->txtp.traffic_class && !qos->rxtp.traffic_class)
return -EINVAL;
if (qos->txtp.traffic_class != qos->rxtp.traffic_class &&
qos->txtp.traffic_class && qos->rxtp.traffic_class &&
qos->txtp.traffic_class != ATM_ANYCLASS &&
qos->rxtp.traffic_class != ATM_ANYCLASS)
return -EINVAL;
error = check_tp(&qos->txtp);
if (error)
return error;
return check_tp(&qos->rxtp);
}
int vcc_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
struct atm_vcc *vcc;
unsigned long value;
int error;
if (__SO_LEVEL_MATCH(optname, level) && optlen != __SO_SIZE(optname))
return -EINVAL;
vcc = ATM_SD(sock);
switch (optname) {
case SO_ATMQOS:
{
struct atm_qos qos;
if (copy_from_sockptr(&qos, optval, sizeof(qos)))
return -EFAULT;
error = check_qos(&qos);
if (error)
return error;
if (sock->state == SS_CONNECTED)
return atm_change_qos(vcc, &qos);
if (sock->state != SS_UNCONNECTED)
return -EBADFD;
vcc->qos = qos;
set_bit(ATM_VF_HASQOS, &vcc->flags);
return 0;
}
case SO_SETCLP:
if (copy_from_sockptr(&value, optval, sizeof(value)))
return -EFAULT;
if (value)
vcc->atm_options |= ATM_ATMOPT_CLP;
else
vcc->atm_options &= ~ATM_ATMOPT_CLP;
return 0;
default:
return -EINVAL;
}
}
int vcc_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct atm_vcc *vcc;
int len;
if (get_user(len, optlen))
return -EFAULT;
if (__SO_LEVEL_MATCH(optname, level) && len != __SO_SIZE(optname))
return -EINVAL;
vcc = ATM_SD(sock);
switch (optname) {
case SO_ATMQOS:
if (!test_bit(ATM_VF_HASQOS, &vcc->flags))
return -EINVAL;
return copy_to_user(optval, &vcc->qos, sizeof(vcc->qos))
? -EFAULT : 0;
case SO_SETCLP:
return put_user(vcc->atm_options & ATM_ATMOPT_CLP ? 1 : 0,
(unsigned long __user *)optval) ? -EFAULT : 0;
case SO_ATMPVC:
{
struct sockaddr_atmpvc pvc;
if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags))
return -ENOTCONN;
memset(&pvc, 0, sizeof(pvc));
pvc.sap_family = AF_ATMPVC;
pvc.sap_addr.itf = vcc->dev->number;
pvc.sap_addr.vpi = vcc->vpi;
pvc.sap_addr.vci = vcc->vci;
return copy_to_user(optval, &pvc, sizeof(pvc)) ? -EFAULT : 0;
}
default:
return -EINVAL;
}
}
int register_atmdevice_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&atm_dev_notify_chain, nb);
}
EXPORT_SYMBOL_GPL(register_atmdevice_notifier);
void unregister_atmdevice_notifier(struct notifier_block *nb)
{
atomic_notifier_chain_unregister(&atm_dev_notify_chain, nb);
}
EXPORT_SYMBOL_GPL(unregister_atmdevice_notifier);
static int __init atm_init(void)
{
int error;
error = proto_register(&vcc_proto, 0);
if (error < 0)
goto out;
error = atmpvc_init();
if (error < 0) {
pr_err("atmpvc_init() failed with %d\n", error);
goto out_unregister_vcc_proto;
}
error = atmsvc_init();
if (error < 0) {
pr_err("atmsvc_init() failed with %d\n", error);
goto out_atmpvc_exit;
}
error = atm_proc_init();
if (error < 0) {
pr_err("atm_proc_init() failed with %d\n", error);
goto out_atmsvc_exit;
}
error = atm_sysfs_init();
if (error < 0) {
pr_err("atm_sysfs_init() failed with %d\n", error);
goto out_atmproc_exit;
}
out:
return error;
out_atmproc_exit:
atm_proc_exit();
out_atmsvc_exit:
atmsvc_exit();
out_atmpvc_exit:
atmsvc_exit();
out_unregister_vcc_proto:
proto_unregister(&vcc_proto);
goto out;
}
static void __exit atm_exit(void)
{
atm_proc_exit();
atm_sysfs_exit();
atmsvc_exit();
atmpvc_exit();
proto_unregister(&vcc_proto);
}
subsys_initcall(atm_init);
module_exit(atm_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_ATMPVC);
MODULE_ALIAS_NETPROTO(PF_ATMSVC);
| linux-master | net/atm/common.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/atmmpc.h>
#include <linux/slab.h>
#include <linux/time.h>
#include "mpoa_caches.h"
#include "mpc.h"
/*
* mpoa_caches.c: Implementation of ingress and egress cache
* handling functions
*/
#if 0
#define dprintk(format, args...) \
printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args) /* debug */
#else
#define dprintk(format, args...) \
do { if (0) \
printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args);\
} while (0)
#endif
#if 0
#define ddprintk(format, args...) \
printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args) /* debug */
#else
#define ddprintk(format, args...) \
do { if (0) \
printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args);\
} while (0)
#endif
static in_cache_entry *in_cache_get(__be32 dst_ip,
struct mpoa_client *client)
{
in_cache_entry *entry;
read_lock_bh(&client->ingress_lock);
entry = client->in_cache;
while (entry != NULL) {
if (entry->ctrl_info.in_dst_ip == dst_ip) {
refcount_inc(&entry->use);
read_unlock_bh(&client->ingress_lock);
return entry;
}
entry = entry->next;
}
read_unlock_bh(&client->ingress_lock);
return NULL;
}
static in_cache_entry *in_cache_get_with_mask(__be32 dst_ip,
struct mpoa_client *client,
__be32 mask)
{
in_cache_entry *entry;
read_lock_bh(&client->ingress_lock);
entry = client->in_cache;
while (entry != NULL) {
if ((entry->ctrl_info.in_dst_ip & mask) == (dst_ip & mask)) {
refcount_inc(&entry->use);
read_unlock_bh(&client->ingress_lock);
return entry;
}
entry = entry->next;
}
read_unlock_bh(&client->ingress_lock);
return NULL;
}
static in_cache_entry *in_cache_get_by_vcc(struct atm_vcc *vcc,
struct mpoa_client *client)
{
in_cache_entry *entry;
read_lock_bh(&client->ingress_lock);
entry = client->in_cache;
while (entry != NULL) {
if (entry->shortcut == vcc) {
refcount_inc(&entry->use);
read_unlock_bh(&client->ingress_lock);
return entry;
}
entry = entry->next;
}
read_unlock_bh(&client->ingress_lock);
return NULL;
}
static in_cache_entry *in_cache_add_entry(__be32 dst_ip,
struct mpoa_client *client)
{
in_cache_entry *entry = kzalloc(sizeof(in_cache_entry), GFP_KERNEL);
if (entry == NULL) {
pr_info("mpoa: mpoa_caches.c: new_in_cache_entry: out of memory\n");
return NULL;
}
dprintk("adding an ingress entry, ip = %pI4\n", &dst_ip);
refcount_set(&entry->use, 1);
dprintk("new_in_cache_entry: about to lock\n");
write_lock_bh(&client->ingress_lock);
entry->next = client->in_cache;
entry->prev = NULL;
if (client->in_cache != NULL)
client->in_cache->prev = entry;
client->in_cache = entry;
memcpy(entry->MPS_ctrl_ATM_addr, client->mps_ctrl_addr, ATM_ESA_LEN);
entry->ctrl_info.in_dst_ip = dst_ip;
entry->time = ktime_get_seconds();
entry->retry_time = client->parameters.mpc_p4;
entry->count = 1;
entry->entry_state = INGRESS_INVALID;
entry->ctrl_info.holding_time = HOLDING_TIME_DEFAULT;
refcount_inc(&entry->use);
write_unlock_bh(&client->ingress_lock);
dprintk("new_in_cache_entry: unlocked\n");
return entry;
}
static int cache_hit(in_cache_entry *entry, struct mpoa_client *mpc)
{
struct atm_mpoa_qos *qos;
struct k_message msg;
entry->count++;
if (entry->entry_state == INGRESS_RESOLVED && entry->shortcut != NULL)
return OPEN;
if (entry->entry_state == INGRESS_REFRESHING) {
if (entry->count > mpc->parameters.mpc_p1) {
msg.type = SND_MPOA_RES_RQST;
msg.content.in_info = entry->ctrl_info;
memcpy(msg.MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN);
qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip);
if (qos != NULL)
msg.qos = qos->qos;
msg_to_mpoad(&msg, mpc);
entry->reply_wait = ktime_get_seconds();
entry->entry_state = INGRESS_RESOLVING;
}
if (entry->shortcut != NULL)
return OPEN;
return CLOSED;
}
if (entry->entry_state == INGRESS_RESOLVING && entry->shortcut != NULL)
return OPEN;
if (entry->count > mpc->parameters.mpc_p1 &&
entry->entry_state == INGRESS_INVALID) {
dprintk("(%s) threshold exceeded for ip %pI4, sending MPOA res req\n",
mpc->dev->name, &entry->ctrl_info.in_dst_ip);
entry->entry_state = INGRESS_RESOLVING;
msg.type = SND_MPOA_RES_RQST;
memcpy(msg.MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN);
msg.content.in_info = entry->ctrl_info;
qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip);
if (qos != NULL)
msg.qos = qos->qos;
msg_to_mpoad(&msg, mpc);
entry->reply_wait = ktime_get_seconds();
}
return CLOSED;
}
static void in_cache_put(in_cache_entry *entry)
{
if (refcount_dec_and_test(&entry->use)) {
kfree_sensitive(entry);
}
}
/*
* This should be called with write lock on
*/
static void in_cache_remove_entry(in_cache_entry *entry,
struct mpoa_client *client)
{
struct atm_vcc *vcc;
struct k_message msg;
vcc = entry->shortcut;
dprintk("removing an ingress entry, ip = %pI4\n",
&entry->ctrl_info.in_dst_ip);
if (entry->prev != NULL)
entry->prev->next = entry->next;
else
client->in_cache = entry->next;
if (entry->next != NULL)
entry->next->prev = entry->prev;
client->in_ops->put(entry);
if (client->in_cache == NULL && client->eg_cache == NULL) {
msg.type = STOP_KEEP_ALIVE_SM;
msg_to_mpoad(&msg, client);
}
/* Check if the egress side still uses this VCC */
if (vcc != NULL) {
eg_cache_entry *eg_entry = client->eg_ops->get_by_vcc(vcc,
client);
if (eg_entry != NULL) {
client->eg_ops->put(eg_entry);
return;
}
vcc_release_async(vcc, -EPIPE);
}
}
/* Call this every MPC-p2 seconds... Not exactly correct solution,
but an easy one... */
static void clear_count_and_expired(struct mpoa_client *client)
{
in_cache_entry *entry, *next_entry;
time64_t now;
now = ktime_get_seconds();
write_lock_bh(&client->ingress_lock);
entry = client->in_cache;
while (entry != NULL) {
entry->count = 0;
next_entry = entry->next;
if ((now - entry->time) > entry->ctrl_info.holding_time) {
dprintk("holding time expired, ip = %pI4\n",
&entry->ctrl_info.in_dst_ip);
client->in_ops->remove_entry(entry, client);
}
entry = next_entry;
}
write_unlock_bh(&client->ingress_lock);
}
/* Call this every MPC-p4 seconds. */
static void check_resolving_entries(struct mpoa_client *client)
{
struct atm_mpoa_qos *qos;
in_cache_entry *entry;
time64_t now;
struct k_message msg;
now = ktime_get_seconds();
read_lock_bh(&client->ingress_lock);
entry = client->in_cache;
while (entry != NULL) {
if (entry->entry_state == INGRESS_RESOLVING) {
if ((now - entry->hold_down)
< client->parameters.mpc_p6) {
entry = entry->next; /* Entry in hold down */
continue;
}
if ((now - entry->reply_wait) > entry->retry_time) {
entry->retry_time = MPC_C1 * (entry->retry_time);
/*
* Retry time maximum exceeded,
* put entry in hold down.
*/
if (entry->retry_time > client->parameters.mpc_p5) {
entry->hold_down = ktime_get_seconds();
entry->retry_time = client->parameters.mpc_p4;
entry = entry->next;
continue;
}
/* Ask daemon to send a resolution request. */
memset(&entry->hold_down, 0, sizeof(time64_t));
msg.type = SND_MPOA_RES_RTRY;
memcpy(msg.MPS_ctrl, client->mps_ctrl_addr, ATM_ESA_LEN);
msg.content.in_info = entry->ctrl_info;
qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip);
if (qos != NULL)
msg.qos = qos->qos;
msg_to_mpoad(&msg, client);
entry->reply_wait = ktime_get_seconds();
}
}
entry = entry->next;
}
read_unlock_bh(&client->ingress_lock);
}
/* Call this every MPC-p5 seconds. */
static void refresh_entries(struct mpoa_client *client)
{
time64_t now;
struct in_cache_entry *entry = client->in_cache;
ddprintk("refresh_entries\n");
now = ktime_get_seconds();
read_lock_bh(&client->ingress_lock);
while (entry != NULL) {
if (entry->entry_state == INGRESS_RESOLVED) {
if (!(entry->refresh_time))
entry->refresh_time = (2 * (entry->ctrl_info.holding_time))/3;
if ((now - entry->reply_wait) >
entry->refresh_time) {
dprintk("refreshing an entry.\n");
entry->entry_state = INGRESS_REFRESHING;
}
}
entry = entry->next;
}
read_unlock_bh(&client->ingress_lock);
}
static void in_destroy_cache(struct mpoa_client *mpc)
{
write_lock_irq(&mpc->ingress_lock);
while (mpc->in_cache != NULL)
mpc->in_ops->remove_entry(mpc->in_cache, mpc);
write_unlock_irq(&mpc->ingress_lock);
}
static eg_cache_entry *eg_cache_get_by_cache_id(__be32 cache_id,
struct mpoa_client *mpc)
{
eg_cache_entry *entry;
read_lock_irq(&mpc->egress_lock);
entry = mpc->eg_cache;
while (entry != NULL) {
if (entry->ctrl_info.cache_id == cache_id) {
refcount_inc(&entry->use);
read_unlock_irq(&mpc->egress_lock);
return entry;
}
entry = entry->next;
}
read_unlock_irq(&mpc->egress_lock);
return NULL;
}
/* This can be called from any context since it saves CPU flags */
static eg_cache_entry *eg_cache_get_by_tag(__be32 tag, struct mpoa_client *mpc)
{
unsigned long flags;
eg_cache_entry *entry;
read_lock_irqsave(&mpc->egress_lock, flags);
entry = mpc->eg_cache;
while (entry != NULL) {
if (entry->ctrl_info.tag == tag) {
refcount_inc(&entry->use);
read_unlock_irqrestore(&mpc->egress_lock, flags);
return entry;
}
entry = entry->next;
}
read_unlock_irqrestore(&mpc->egress_lock, flags);
return NULL;
}
/* This can be called from any context since it saves CPU flags */
static eg_cache_entry *eg_cache_get_by_vcc(struct atm_vcc *vcc,
struct mpoa_client *mpc)
{
unsigned long flags;
eg_cache_entry *entry;
read_lock_irqsave(&mpc->egress_lock, flags);
entry = mpc->eg_cache;
while (entry != NULL) {
if (entry->shortcut == vcc) {
refcount_inc(&entry->use);
read_unlock_irqrestore(&mpc->egress_lock, flags);
return entry;
}
entry = entry->next;
}
read_unlock_irqrestore(&mpc->egress_lock, flags);
return NULL;
}
static eg_cache_entry *eg_cache_get_by_src_ip(__be32 ipaddr,
struct mpoa_client *mpc)
{
eg_cache_entry *entry;
read_lock_irq(&mpc->egress_lock);
entry = mpc->eg_cache;
while (entry != NULL) {
if (entry->latest_ip_addr == ipaddr) {
refcount_inc(&entry->use);
read_unlock_irq(&mpc->egress_lock);
return entry;
}
entry = entry->next;
}
read_unlock_irq(&mpc->egress_lock);
return NULL;
}
static void eg_cache_put(eg_cache_entry *entry)
{
if (refcount_dec_and_test(&entry->use)) {
kfree_sensitive(entry);
}
}
/*
* This should be called with write lock on
*/
static void eg_cache_remove_entry(eg_cache_entry *entry,
struct mpoa_client *client)
{
struct atm_vcc *vcc;
struct k_message msg;
vcc = entry->shortcut;
dprintk("removing an egress entry.\n");
if (entry->prev != NULL)
entry->prev->next = entry->next;
else
client->eg_cache = entry->next;
if (entry->next != NULL)
entry->next->prev = entry->prev;
client->eg_ops->put(entry);
if (client->in_cache == NULL && client->eg_cache == NULL) {
msg.type = STOP_KEEP_ALIVE_SM;
msg_to_mpoad(&msg, client);
}
/* Check if the ingress side still uses this VCC */
if (vcc != NULL) {
in_cache_entry *in_entry = client->in_ops->get_by_vcc(vcc, client);
if (in_entry != NULL) {
client->in_ops->put(in_entry);
return;
}
vcc_release_async(vcc, -EPIPE);
}
}
static eg_cache_entry *eg_cache_add_entry(struct k_message *msg,
struct mpoa_client *client)
{
eg_cache_entry *entry = kzalloc(sizeof(eg_cache_entry), GFP_KERNEL);
if (entry == NULL) {
pr_info("out of memory\n");
return NULL;
}
dprintk("adding an egress entry, ip = %pI4, this should be our IP\n",
&msg->content.eg_info.eg_dst_ip);
refcount_set(&entry->use, 1);
dprintk("new_eg_cache_entry: about to lock\n");
write_lock_irq(&client->egress_lock);
entry->next = client->eg_cache;
entry->prev = NULL;
if (client->eg_cache != NULL)
client->eg_cache->prev = entry;
client->eg_cache = entry;
memcpy(entry->MPS_ctrl_ATM_addr, client->mps_ctrl_addr, ATM_ESA_LEN);
entry->ctrl_info = msg->content.eg_info;
entry->time = ktime_get_seconds();
entry->entry_state = EGRESS_RESOLVED;
dprintk("new_eg_cache_entry cache_id %u\n",
ntohl(entry->ctrl_info.cache_id));
dprintk("mps_ip = %pI4\n", &entry->ctrl_info.mps_ip);
refcount_inc(&entry->use);
write_unlock_irq(&client->egress_lock);
dprintk("new_eg_cache_entry: unlocked\n");
return entry;
}
static void update_eg_cache_entry(eg_cache_entry *entry, uint16_t holding_time)
{
entry->time = ktime_get_seconds();
entry->entry_state = EGRESS_RESOLVED;
entry->ctrl_info.holding_time = holding_time;
}
static void clear_expired(struct mpoa_client *client)
{
eg_cache_entry *entry, *next_entry;
time64_t now;
struct k_message msg;
now = ktime_get_seconds();
write_lock_irq(&client->egress_lock);
entry = client->eg_cache;
while (entry != NULL) {
next_entry = entry->next;
if ((now - entry->time) > entry->ctrl_info.holding_time) {
msg.type = SND_EGRESS_PURGE;
msg.content.eg_info = entry->ctrl_info;
dprintk("egress_cache: holding time expired, cache_id = %u.\n",
ntohl(entry->ctrl_info.cache_id));
msg_to_mpoad(&msg, client);
client->eg_ops->remove_entry(entry, client);
}
entry = next_entry;
}
write_unlock_irq(&client->egress_lock);
}
static void eg_destroy_cache(struct mpoa_client *mpc)
{
write_lock_irq(&mpc->egress_lock);
while (mpc->eg_cache != NULL)
mpc->eg_ops->remove_entry(mpc->eg_cache, mpc);
write_unlock_irq(&mpc->egress_lock);
}
static const struct in_cache_ops ingress_ops = {
.add_entry = in_cache_add_entry,
.get = in_cache_get,
.get_with_mask = in_cache_get_with_mask,
.get_by_vcc = in_cache_get_by_vcc,
.put = in_cache_put,
.remove_entry = in_cache_remove_entry,
.cache_hit = cache_hit,
.clear_count = clear_count_and_expired,
.check_resolving = check_resolving_entries,
.refresh = refresh_entries,
.destroy_cache = in_destroy_cache
};
static const struct eg_cache_ops egress_ops = {
.add_entry = eg_cache_add_entry,
.get_by_cache_id = eg_cache_get_by_cache_id,
.get_by_tag = eg_cache_get_by_tag,
.get_by_vcc = eg_cache_get_by_vcc,
.get_by_src_ip = eg_cache_get_by_src_ip,
.put = eg_cache_put,
.remove_entry = eg_cache_remove_entry,
.update = update_eg_cache_entry,
.clear_expired = clear_expired,
.destroy_cache = eg_destroy_cache
};
void atm_mpoa_init_cache(struct mpoa_client *mpc)
{
mpc->in_ops = &ingress_ops;
mpc->eg_ops = &egress_ops;
}
| linux-master | net/atm/mpoa_caches.c |
// SPDX-License-Identifier: GPL-2.0
/* net/atm/raw.c - Raw AAL0 and AAL5 transports */
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/module.h>
#include <linux/atmdev.h>
#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include "common.h"
#include "protocols.h"
/*
* SKB == NULL indicates that the link is being closed
*/
static void atm_push_raw(struct atm_vcc *vcc, struct sk_buff *skb)
{
if (skb) {
struct sock *sk = sk_atm(vcc);
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_data_ready(sk);
}
}
static void atm_pop_raw(struct atm_vcc *vcc, struct sk_buff *skb)
{
struct sock *sk = sk_atm(vcc);
pr_debug("(%d) %d -= %d\n",
vcc->vci, sk_wmem_alloc_get(sk), ATM_SKB(skb)->acct_truesize);
WARN_ON(refcount_sub_and_test(ATM_SKB(skb)->acct_truesize, &sk->sk_wmem_alloc));
dev_kfree_skb_any(skb);
sk->sk_write_space(sk);
}
static int atm_send_aal0(struct atm_vcc *vcc, struct sk_buff *skb)
{
/*
* Note that if vpi/vci are _ANY or _UNSPEC the below will
* still work
*/
if (!capable(CAP_NET_ADMIN) &&
(((u32 *)skb->data)[0] & (ATM_HDR_VPI_MASK | ATM_HDR_VCI_MASK)) !=
((vcc->vpi << ATM_HDR_VPI_SHIFT) |
(vcc->vci << ATM_HDR_VCI_SHIFT))) {
kfree_skb(skb);
return -EADDRNOTAVAIL;
}
if (vcc->dev->ops->send_bh)
return vcc->dev->ops->send_bh(vcc, skb);
return vcc->dev->ops->send(vcc, skb);
}
int atm_init_aal0(struct atm_vcc *vcc)
{
vcc->push = atm_push_raw;
vcc->pop = atm_pop_raw;
vcc->push_oam = NULL;
vcc->send = atm_send_aal0;
return 0;
}
int atm_init_aal34(struct atm_vcc *vcc)
{
vcc->push = atm_push_raw;
vcc->pop = atm_pop_raw;
vcc->push_oam = NULL;
if (vcc->dev->ops->send_bh)
vcc->send = vcc->dev->ops->send_bh;
else
vcc->send = vcc->dev->ops->send;
return 0;
}
int atm_init_aal5(struct atm_vcc *vcc)
{
vcc->push = atm_push_raw;
vcc->pop = atm_pop_raw;
vcc->push_oam = NULL;
if (vcc->dev->ops->send_bh)
vcc->send = vcc->dev->ops->send_bh;
else
vcc->send = vcc->dev->ops->send;
return 0;
}
EXPORT_SYMBOL(atm_init_aal5);
| linux-master | net/atm/raw.c |
// SPDX-License-Identifier: GPL-2.0
/* net/atm/addr.c - Local ATM address registry */
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
#include <linux/atm.h>
#include <linux/atmdev.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include "signaling.h"
#include "addr.h"
static int check_addr(const struct sockaddr_atmsvc *addr)
{
int i;
if (addr->sas_family != AF_ATMSVC)
return -EAFNOSUPPORT;
if (!*addr->sas_addr.pub)
return *addr->sas_addr.prv ? 0 : -EINVAL;
for (i = 1; i < ATM_E164_LEN + 1; i++) /* make sure it's \0-terminated */
if (!addr->sas_addr.pub[i])
return 0;
return -EINVAL;
}
static int identical(const struct sockaddr_atmsvc *a, const struct sockaddr_atmsvc *b)
{
if (*a->sas_addr.prv)
if (memcmp(a->sas_addr.prv, b->sas_addr.prv, ATM_ESA_LEN))
return 0;
if (!*a->sas_addr.pub)
return !*b->sas_addr.pub;
if (!*b->sas_addr.pub)
return 0;
return !strcmp(a->sas_addr.pub, b->sas_addr.pub);
}
static void notify_sigd(const struct atm_dev *dev)
{
struct sockaddr_atmpvc pvc;
pvc.sap_addr.itf = dev->number;
sigd_enq(NULL, as_itf_notify, NULL, &pvc, NULL);
}
void atm_reset_addr(struct atm_dev *dev, enum atm_addr_type_t atype)
{
unsigned long flags;
struct atm_dev_addr *this, *p;
struct list_head *head;
spin_lock_irqsave(&dev->lock, flags);
if (atype == ATM_ADDR_LECS)
head = &dev->lecs;
else
head = &dev->local;
list_for_each_entry_safe(this, p, head, entry) {
list_del(&this->entry);
kfree(this);
}
spin_unlock_irqrestore(&dev->lock, flags);
if (head == &dev->local)
notify_sigd(dev);
}
int atm_add_addr(struct atm_dev *dev, const struct sockaddr_atmsvc *addr,
enum atm_addr_type_t atype)
{
unsigned long flags;
struct atm_dev_addr *this;
struct list_head *head;
int error;
error = check_addr(addr);
if (error)
return error;
spin_lock_irqsave(&dev->lock, flags);
if (atype == ATM_ADDR_LECS)
head = &dev->lecs;
else
head = &dev->local;
list_for_each_entry(this, head, entry) {
if (identical(&this->addr, addr)) {
spin_unlock_irqrestore(&dev->lock, flags);
return -EEXIST;
}
}
this = kmalloc(sizeof(struct atm_dev_addr), GFP_ATOMIC);
if (!this) {
spin_unlock_irqrestore(&dev->lock, flags);
return -ENOMEM;
}
this->addr = *addr;
list_add(&this->entry, head);
spin_unlock_irqrestore(&dev->lock, flags);
if (head == &dev->local)
notify_sigd(dev);
return 0;
}
int atm_del_addr(struct atm_dev *dev, const struct sockaddr_atmsvc *addr,
enum atm_addr_type_t atype)
{
unsigned long flags;
struct atm_dev_addr *this;
struct list_head *head;
int error;
error = check_addr(addr);
if (error)
return error;
spin_lock_irqsave(&dev->lock, flags);
if (atype == ATM_ADDR_LECS)
head = &dev->lecs;
else
head = &dev->local;
list_for_each_entry(this, head, entry) {
if (identical(&this->addr, addr)) {
list_del(&this->entry);
spin_unlock_irqrestore(&dev->lock, flags);
kfree(this);
if (head == &dev->local)
notify_sigd(dev);
return 0;
}
}
spin_unlock_irqrestore(&dev->lock, flags);
return -ENOENT;
}
int atm_get_addr(struct atm_dev *dev, struct sockaddr_atmsvc __user * buf,
size_t size, enum atm_addr_type_t atype)
{
unsigned long flags;
struct atm_dev_addr *this;
struct list_head *head;
int total = 0, error;
struct sockaddr_atmsvc *tmp_buf, *tmp_bufp;
spin_lock_irqsave(&dev->lock, flags);
if (atype == ATM_ADDR_LECS)
head = &dev->lecs;
else
head = &dev->local;
list_for_each_entry(this, head, entry)
total += sizeof(struct sockaddr_atmsvc);
tmp_buf = tmp_bufp = kmalloc(total, GFP_ATOMIC);
if (!tmp_buf) {
spin_unlock_irqrestore(&dev->lock, flags);
return -ENOMEM;
}
list_for_each_entry(this, head, entry)
memcpy(tmp_bufp++, &this->addr, sizeof(struct sockaddr_atmsvc));
spin_unlock_irqrestore(&dev->lock, flags);
error = total > size ? -E2BIG : total;
if (copy_to_user(buf, tmp_buf, total < size ? total : size))
error = -EFAULT;
kfree(tmp_buf);
return error;
}
| linux-master | net/atm/addr.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* net/atm/pppoatm.c - RFC2364 PPP over ATM/AAL5 */
/* Copyright 1999-2000 by Mitchell Blank Jr */
/* Based on clip.c; 1995-1999 by Werner Almesberger, EPFL LRC/ICA */
/* And on ppp_async.c; Copyright 1999 Paul Mackerras */
/* And help from Jens Axboe */
/*
*
* This driver provides the encapsulation and framing for sending
* and receiving PPP frames in ATM AAL5 PDUs.
*/
/*
* One shortcoming of this driver is that it does not comply with
* section 8 of RFC2364 - we are supposed to detect a change
* in encapsulation and immediately abort the connection (in order
* to avoid a black-hole being created if our peer loses state
* and changes encapsulation unilaterally. However, since the
* ppp_generic layer actually does the decapsulation, we need
* a way of notifying it when we _think_ there might be a problem)
* There's two cases:
* 1. LLC-encapsulation was missing when it was enabled. In
* this case, we should tell the upper layer "tear down
* this session if this skb looks ok to you"
* 2. LLC-encapsulation was present when it was disabled. Then
* we need to tell the upper layer "this packet may be
* ok, but if its in error tear down the session"
* These hooks are not yet available in ppp_generic
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/atm.h>
#include <linux/atmdev.h>
#include <linux/capability.h>
#include <linux/ppp_defs.h>
#include <linux/ppp-ioctl.h>
#include <linux/ppp_channel.h>
#include <linux/atmppp.h>
#include "common.h"
enum pppoatm_encaps {
e_autodetect = PPPOATM_ENCAPS_AUTODETECT,
e_vc = PPPOATM_ENCAPS_VC,
e_llc = PPPOATM_ENCAPS_LLC,
};
struct pppoatm_vcc {
struct atm_vcc *atmvcc; /* VCC descriptor */
void (*old_push)(struct atm_vcc *, struct sk_buff *);
void (*old_pop)(struct atm_vcc *, struct sk_buff *);
void (*old_release_cb)(struct atm_vcc *);
struct module *old_owner;
/* keep old push/pop for detaching */
enum pppoatm_encaps encaps;
atomic_t inflight;
unsigned long blocked;
int flags; /* SC_COMP_PROT - compress protocol */
struct ppp_channel chan; /* interface to generic ppp layer */
struct tasklet_struct wakeup_tasklet;
};
/*
* We want to allow two packets in the queue. The one that's currently in
* flight, and *one* queued up ready for the ATM device to send immediately
* from its TX done IRQ. We want to be able to use atomic_inc_not_zero(), so
* inflight == -2 represents an empty queue, -1 one packet, and zero means
* there are two packets in the queue.
*/
#define NONE_INFLIGHT -2
#define BLOCKED 0
/*
* Header used for LLC Encapsulated PPP (4 bytes) followed by the LCP protocol
* ID (0xC021) used in autodetection
*/
static const unsigned char pppllc[6] = { 0xFE, 0xFE, 0x03, 0xCF, 0xC0, 0x21 };
#define LLC_LEN (4)
static inline struct pppoatm_vcc *atmvcc_to_pvcc(const struct atm_vcc *atmvcc)
{
return (struct pppoatm_vcc *) (atmvcc->user_back);
}
static inline struct pppoatm_vcc *chan_to_pvcc(const struct ppp_channel *chan)
{
return (struct pppoatm_vcc *) (chan->private);
}
/*
* We can't do this directly from our _pop handler, since the ppp code
* doesn't want to be called in interrupt context, so we do it from
* a tasklet
*/
static void pppoatm_wakeup_sender(struct tasklet_struct *t)
{
struct pppoatm_vcc *pvcc = from_tasklet(pvcc, t, wakeup_tasklet);
ppp_output_wakeup(&pvcc->chan);
}
static void pppoatm_release_cb(struct atm_vcc *atmvcc)
{
struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc);
/*
* As in pppoatm_pop(), it's safe to clear the BLOCKED bit here because
* the wakeup *can't* race with pppoatm_send(). They both hold the PPP
* channel's ->downl lock. And the potential race with *setting* it,
* which leads to the double-check dance in pppoatm_may_send(), doesn't
* exist here. In the sock_owned_by_user() case in pppoatm_send(), we
* set the BLOCKED bit while the socket is still locked. We know that
* ->release_cb() can't be called until that's done.
*/
if (test_and_clear_bit(BLOCKED, &pvcc->blocked))
tasklet_schedule(&pvcc->wakeup_tasklet);
if (pvcc->old_release_cb)
pvcc->old_release_cb(atmvcc);
}
/*
* This gets called every time the ATM card has finished sending our
* skb. The ->old_pop will take care up normal atm flow control,
* but we also need to wake up the device if we blocked it
*/
static void pppoatm_pop(struct atm_vcc *atmvcc, struct sk_buff *skb)
{
struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc);
pvcc->old_pop(atmvcc, skb);
atomic_dec(&pvcc->inflight);
/*
* We always used to run the wakeup tasklet unconditionally here, for
* fear of race conditions where we clear the BLOCKED flag just as we
* refuse another packet in pppoatm_send(). This was quite inefficient.
*
* In fact it's OK. The PPP core will only ever call pppoatm_send()
* while holding the channel->downl lock. And ppp_output_wakeup() as
* called by the tasklet will *also* grab that lock. So even if another
* CPU is in pppoatm_send() right now, the tasklet isn't going to race
* with it. The wakeup *will* happen after the other CPU is safely out
* of pppoatm_send() again.
*
* So if the CPU in pppoatm_send() has already set the BLOCKED bit and
* it about to return, that's fine. We trigger a wakeup which will
* happen later. And if the CPU in pppoatm_send() *hasn't* set the
* BLOCKED bit yet, that's fine too because of the double check in
* pppoatm_may_send() which is commented there.
*/
if (test_and_clear_bit(BLOCKED, &pvcc->blocked))
tasklet_schedule(&pvcc->wakeup_tasklet);
}
/*
* Unbind from PPP - currently we only do this when closing the socket,
* but we could put this into an ioctl if need be
*/
static void pppoatm_unassign_vcc(struct atm_vcc *atmvcc)
{
struct pppoatm_vcc *pvcc;
pvcc = atmvcc_to_pvcc(atmvcc);
atmvcc->push = pvcc->old_push;
atmvcc->pop = pvcc->old_pop;
atmvcc->release_cb = pvcc->old_release_cb;
tasklet_kill(&pvcc->wakeup_tasklet);
ppp_unregister_channel(&pvcc->chan);
atmvcc->user_back = NULL;
kfree(pvcc);
}
/* Called when an AAL5 PDU comes in */
static void pppoatm_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
{
struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc);
pr_debug("\n");
if (skb == NULL) { /* VCC was closed */
struct module *module;
pr_debug("removing ATMPPP VCC %p\n", pvcc);
module = pvcc->old_owner;
pppoatm_unassign_vcc(atmvcc);
atmvcc->push(atmvcc, NULL); /* Pass along bad news */
module_put(module);
return;
}
atm_return(atmvcc, skb->truesize);
switch (pvcc->encaps) {
case e_llc:
if (skb->len < LLC_LEN ||
memcmp(skb->data, pppllc, LLC_LEN))
goto error;
skb_pull(skb, LLC_LEN);
break;
case e_autodetect:
if (pvcc->chan.ppp == NULL) { /* Not bound yet! */
kfree_skb(skb);
return;
}
if (skb->len >= sizeof(pppllc) &&
!memcmp(skb->data, pppllc, sizeof(pppllc))) {
pvcc->encaps = e_llc;
skb_pull(skb, LLC_LEN);
break;
}
if (skb->len >= (sizeof(pppllc) - LLC_LEN) &&
!memcmp(skb->data, &pppllc[LLC_LEN],
sizeof(pppllc) - LLC_LEN)) {
pvcc->encaps = e_vc;
pvcc->chan.mtu += LLC_LEN;
break;
}
pr_debug("Couldn't autodetect yet (skb: %6ph)\n", skb->data);
goto error;
case e_vc:
break;
}
ppp_input(&pvcc->chan, skb);
return;
error:
kfree_skb(skb);
ppp_input_error(&pvcc->chan, 0);
}
static int pppoatm_may_send(struct pppoatm_vcc *pvcc, int size)
{
/*
* It's not clear that we need to bother with using atm_may_send()
* to check we don't exceed sk->sk_sndbuf. If userspace sets a
* value of sk_sndbuf which is lower than the MTU, we're going to
* block for ever. But the code always did that before we introduced
* the packet count limit, so...
*/
if (atm_may_send(pvcc->atmvcc, size) &&
atomic_inc_not_zero(&pvcc->inflight))
return 1;
/*
* We use test_and_set_bit() rather than set_bit() here because
* we need to ensure there's a memory barrier after it. The bit
* *must* be set before we do the atomic_inc() on pvcc->inflight.
* There's no smp_mb__after_set_bit(), so it's this or abuse
* smp_mb__after_atomic().
*/
test_and_set_bit(BLOCKED, &pvcc->blocked);
/*
* We may have raced with pppoatm_pop(). If it ran for the
* last packet in the queue, *just* before we set the BLOCKED
* bit, then it might never run again and the channel could
* remain permanently blocked. Cope with that race by checking
* *again*. If it did run in that window, we'll have space on
* the queue now and can return success. It's harmless to leave
* the BLOCKED flag set, since it's only used as a trigger to
* run the wakeup tasklet. Another wakeup will never hurt.
* If pppoatm_pop() is running but hasn't got as far as making
* space on the queue yet, then it hasn't checked the BLOCKED
* flag yet either, so we're safe in that case too. It'll issue
* an "immediate" wakeup... where "immediate" actually involves
* taking the PPP channel's ->downl lock, which is held by the
* code path that calls pppoatm_send(), and is thus going to
* wait for us to finish.
*/
if (atm_may_send(pvcc->atmvcc, size) &&
atomic_inc_not_zero(&pvcc->inflight))
return 1;
return 0;
}
/*
* Called by the ppp_generic.c to send a packet - returns true if packet
* was accepted. If we return false, then it's our job to call
* ppp_output_wakeup(chan) when we're feeling more up to it.
* Note that in the ENOMEM case (as opposed to the !atm_may_send case)
* we should really drop the packet, but the generic layer doesn't
* support this yet. We just return 'DROP_PACKET' which we actually define
* as success, just to be clear what we're really doing.
*/
#define DROP_PACKET 1
static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
{
struct pppoatm_vcc *pvcc = chan_to_pvcc(chan);
struct atm_vcc *vcc;
int ret;
ATM_SKB(skb)->vcc = pvcc->atmvcc;
pr_debug("(skb=0x%p, vcc=0x%p)\n", skb, pvcc->atmvcc);
if (skb->data[0] == '\0' && (pvcc->flags & SC_COMP_PROT))
(void) skb_pull(skb, 1);
vcc = ATM_SKB(skb)->vcc;
bh_lock_sock(sk_atm(vcc));
if (sock_owned_by_user(sk_atm(vcc))) {
/*
* Needs to happen (and be flushed, hence test_and_) before we unlock
* the socket. It needs to be seen by the time our ->release_cb gets
* called.
*/
test_and_set_bit(BLOCKED, &pvcc->blocked);
goto nospace;
}
if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
test_bit(ATM_VF_CLOSE, &vcc->flags) ||
!test_bit(ATM_VF_READY, &vcc->flags)) {
bh_unlock_sock(sk_atm(vcc));
kfree_skb(skb);
return DROP_PACKET;
}
switch (pvcc->encaps) { /* LLC encapsulation needed */
case e_llc:
if (skb_headroom(skb) < LLC_LEN) {
struct sk_buff *n;
n = skb_realloc_headroom(skb, LLC_LEN);
if (n != NULL &&
!pppoatm_may_send(pvcc, n->truesize)) {
kfree_skb(n);
goto nospace;
}
consume_skb(skb);
skb = n;
if (skb == NULL) {
bh_unlock_sock(sk_atm(vcc));
return DROP_PACKET;
}
} else if (!pppoatm_may_send(pvcc, skb->truesize))
goto nospace;
memcpy(skb_push(skb, LLC_LEN), pppllc, LLC_LEN);
break;
case e_vc:
if (!pppoatm_may_send(pvcc, skb->truesize))
goto nospace;
break;
case e_autodetect:
bh_unlock_sock(sk_atm(vcc));
pr_debug("Trying to send without setting encaps!\n");
kfree_skb(skb);
return 1;
}
atm_account_tx(vcc, skb);
pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n",
skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev);
ret = ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb)
? DROP_PACKET : 1;
bh_unlock_sock(sk_atm(vcc));
return ret;
nospace:
bh_unlock_sock(sk_atm(vcc));
/*
* We don't have space to send this SKB now, but we might have
* already applied SC_COMP_PROT compression, so may need to undo
*/
if ((pvcc->flags & SC_COMP_PROT) && skb_headroom(skb) > 0 &&
skb->data[-1] == '\0')
(void) skb_push(skb, 1);
return 0;
}
/* This handles ioctls sent to the /dev/ppp interface */
static int pppoatm_devppp_ioctl(struct ppp_channel *chan, unsigned int cmd,
unsigned long arg)
{
switch (cmd) {
case PPPIOCGFLAGS:
return put_user(chan_to_pvcc(chan)->flags, (int __user *) arg)
? -EFAULT : 0;
case PPPIOCSFLAGS:
return get_user(chan_to_pvcc(chan)->flags, (int __user *) arg)
? -EFAULT : 0;
}
return -ENOTTY;
}
static const struct ppp_channel_ops pppoatm_ops = {
.start_xmit = pppoatm_send,
.ioctl = pppoatm_devppp_ioctl,
};
static int pppoatm_assign_vcc(struct atm_vcc *atmvcc, void __user *arg)
{
struct atm_backend_ppp be;
struct pppoatm_vcc *pvcc;
int err;
if (copy_from_user(&be, arg, sizeof be))
return -EFAULT;
if (be.encaps != PPPOATM_ENCAPS_AUTODETECT &&
be.encaps != PPPOATM_ENCAPS_VC && be.encaps != PPPOATM_ENCAPS_LLC)
return -EINVAL;
pvcc = kzalloc(sizeof(*pvcc), GFP_KERNEL);
if (pvcc == NULL)
return -ENOMEM;
pvcc->atmvcc = atmvcc;
/* Maximum is zero, so that we can use atomic_inc_not_zero() */
atomic_set(&pvcc->inflight, NONE_INFLIGHT);
pvcc->old_push = atmvcc->push;
pvcc->old_pop = atmvcc->pop;
pvcc->old_owner = atmvcc->owner;
pvcc->old_release_cb = atmvcc->release_cb;
pvcc->encaps = (enum pppoatm_encaps) be.encaps;
pvcc->chan.private = pvcc;
pvcc->chan.ops = &pppoatm_ops;
pvcc->chan.mtu = atmvcc->qos.txtp.max_sdu - PPP_HDRLEN -
(be.encaps == e_vc ? 0 : LLC_LEN);
tasklet_setup(&pvcc->wakeup_tasklet, pppoatm_wakeup_sender);
err = ppp_register_channel(&pvcc->chan);
if (err != 0) {
kfree(pvcc);
return err;
}
atmvcc->user_back = pvcc;
atmvcc->push = pppoatm_push;
atmvcc->pop = pppoatm_pop;
atmvcc->release_cb = pppoatm_release_cb;
__module_get(THIS_MODULE);
atmvcc->owner = THIS_MODULE;
/* re-process everything received between connection setup and
backend setup */
vcc_process_recv_queue(atmvcc);
return 0;
}
/*
* This handles ioctls actually performed on our vcc - we must return
* -ENOIOCTLCMD for any unrecognized ioctl
*/
static int pppoatm_ioctl(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
struct atm_vcc *atmvcc = ATM_SD(sock);
void __user *argp = (void __user *)arg;
if (cmd != ATM_SETBACKEND && atmvcc->push != pppoatm_push)
return -ENOIOCTLCMD;
switch (cmd) {
case ATM_SETBACKEND: {
atm_backend_t b;
if (get_user(b, (atm_backend_t __user *) argp))
return -EFAULT;
if (b != ATM_BACKEND_PPP)
return -ENOIOCTLCMD;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (sock->state != SS_CONNECTED)
return -EINVAL;
return pppoatm_assign_vcc(atmvcc, argp);
}
case PPPIOCGCHAN:
return put_user(ppp_channel_index(&atmvcc_to_pvcc(atmvcc)->
chan), (int __user *) argp) ? -EFAULT : 0;
case PPPIOCGUNIT:
return put_user(ppp_unit_number(&atmvcc_to_pvcc(atmvcc)->
chan), (int __user *) argp) ? -EFAULT : 0;
}
return -ENOIOCTLCMD;
}
static struct atm_ioctl pppoatm_ioctl_ops = {
.owner = THIS_MODULE,
.ioctl = pppoatm_ioctl,
};
static int __init pppoatm_init(void)
{
register_atm_ioctl(&pppoatm_ioctl_ops);
return 0;
}
static void __exit pppoatm_exit(void)
{
deregister_atm_ioctl(&pppoatm_ioctl_ops);
}
module_init(pppoatm_init);
module_exit(pppoatm_exit);
MODULE_AUTHOR("Mitchell Blank Jr <[email protected]>");
MODULE_DESCRIPTION("RFC2364 PPP over ATM/AAL5");
MODULE_LICENSE("GPL");
| linux-master | net/atm/pppoatm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* lec.c: Lan Emulation driver
*
* Marko Kiiskila <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/capability.h>
/* We are ethernet device */
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <net/sock.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <asm/byteorder.h>
#include <linux/uaccess.h>
#include <net/arp.h>
#include <net/dst.h>
#include <linux/proc_fs.h>
#include <linux/spinlock.h>
#include <linux/seq_file.h>
/* And atm device */
#include <linux/atmdev.h>
#include <linux/atmlec.h>
/* Proxy LEC knows about bridging */
#if IS_ENABLED(CONFIG_BRIDGE)
#include "../bridge/br_private.h"
static unsigned char bridge_ula_lec[] = { 0x01, 0x80, 0xc2, 0x00, 0x00 };
#endif
/* Modular too */
#include <linux/module.h>
#include <linux/init.h>
/* Hardening for Spectre-v1 */
#include <linux/nospec.h>
#include "lec.h"
#include "lec_arpc.h"
#include "resources.h"
#define DUMP_PACKETS 0 /*
* 0 = None,
* 1 = 30 first bytes
* 2 = Whole packet
*/
#define LEC_UNRES_QUE_LEN 8 /*
* number of tx packets to queue for a
* single destination while waiting for SVC
*/
static int lec_open(struct net_device *dev);
static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static int lec_close(struct net_device *dev);
static struct lec_arp_table *lec_arp_find(struct lec_priv *priv,
const unsigned char *mac_addr);
static int lec_arp_remove(struct lec_priv *priv,
struct lec_arp_table *to_remove);
/* LANE2 functions */
static void lane2_associate_ind(struct net_device *dev, const u8 *mac_address,
const u8 *tlvs, u32 sizeoftlvs);
static int lane2_resolve(struct net_device *dev, const u8 *dst_mac, int force,
u8 **tlvs, u32 *sizeoftlvs);
static int lane2_associate_req(struct net_device *dev, const u8 *lan_dst,
const u8 *tlvs, u32 sizeoftlvs);
static int lec_addr_delete(struct lec_priv *priv, const unsigned char *atm_addr,
unsigned long permanent);
static void lec_arp_check_empties(struct lec_priv *priv,
struct atm_vcc *vcc, struct sk_buff *skb);
static void lec_arp_destroy(struct lec_priv *priv);
static void lec_arp_init(struct lec_priv *priv);
static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
const unsigned char *mac_to_find,
int is_rdesc,
struct lec_arp_table **ret_entry);
static void lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
const unsigned char *atm_addr,
unsigned long remoteflag,
unsigned int targetless_le_arp);
static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id);
static int lec_mcast_make(struct lec_priv *priv, struct atm_vcc *vcc);
static void lec_set_flush_tran_id(struct lec_priv *priv,
const unsigned char *atm_addr,
unsigned long tran_id);
static void lec_vcc_added(struct lec_priv *priv,
const struct atmlec_ioc *ioc_data,
struct atm_vcc *vcc,
void (*old_push)(struct atm_vcc *vcc,
struct sk_buff *skb));
static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc);
/* must be done under lec_arp_lock */
static inline void lec_arp_hold(struct lec_arp_table *entry)
{
refcount_inc(&entry->usage);
}
static inline void lec_arp_put(struct lec_arp_table *entry)
{
if (refcount_dec_and_test(&entry->usage))
kfree(entry);
}
static struct lane2_ops lane2_ops = {
.resolve = lane2_resolve, /* spec 3.1.3 */
.associate_req = lane2_associate_req, /* spec 3.1.4 */
.associate_indicator = NULL /* spec 3.1.5 */
};
static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
/* Device structures */
static struct net_device *dev_lec[MAX_LEC_ITF];
#if IS_ENABLED(CONFIG_BRIDGE)
static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev)
{
char *buff;
struct lec_priv *priv;
/*
* Check if this is a BPDU. If so, ask zeppelin to send
* LE_TOPOLOGY_REQUEST with the same value of Topology Change bit
* as the Config BPDU has
*/
buff = skb->data + skb->dev->hard_header_len;
if (*buff++ == 0x42 && *buff++ == 0x42 && *buff++ == 0x03) {
struct sock *sk;
struct sk_buff *skb2;
struct atmlec_msg *mesg;
skb2 = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC);
if (skb2 == NULL)
return;
skb2->len = sizeof(struct atmlec_msg);
mesg = (struct atmlec_msg *)skb2->data;
mesg->type = l_topology_change;
buff += 4;
mesg->content.normal.flag = *buff & 0x01;
/* 0x01 is topology change */
priv = netdev_priv(dev);
atm_force_charge(priv->lecd, skb2->truesize);
sk = sk_atm(priv->lecd);
skb_queue_tail(&sk->sk_receive_queue, skb2);
sk->sk_data_ready(sk);
}
}
#endif /* IS_ENABLED(CONFIG_BRIDGE) */
/*
* Open/initialize the netdevice. This is called (in the current kernel)
* sometime after booting when the 'ifconfig' program is run.
*
* This routine should set everything up anew at each open, even
* registers that "should" only need to be set once at boot, so that
* there is non-reboot way to recover if something goes wrong.
*/
static int lec_open(struct net_device *dev)
{
netif_start_queue(dev);
return 0;
}
static void
lec_send(struct atm_vcc *vcc, struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
ATM_SKB(skb)->vcc = vcc;
atm_account_tx(vcc, skb);
if (vcc->send(vcc, skb) < 0) {
dev->stats.tx_dropped++;
return;
}
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
}
static void lec_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
pr_info("%s\n", dev->name);
netif_trans_update(dev);
netif_wake_queue(dev);
}
static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct sk_buff *skb2;
struct lec_priv *priv = netdev_priv(dev);
struct lecdatahdr_8023 *lec_h;
struct atm_vcc *vcc;
struct lec_arp_table *entry;
unsigned char *dst;
int min_frame_size;
int is_rdesc;
pr_debug("called\n");
if (!priv->lecd) {
pr_info("%s:No lecd attached\n", dev->name);
dev->stats.tx_errors++;
netif_stop_queue(dev);
kfree_skb(skb);
return NETDEV_TX_OK;
}
pr_debug("skbuff head:%lx data:%lx tail:%lx end:%lx\n",
(long)skb->head, (long)skb->data, (long)skb_tail_pointer(skb),
(long)skb_end_pointer(skb));
#if IS_ENABLED(CONFIG_BRIDGE)
if (memcmp(skb->data, bridge_ula_lec, sizeof(bridge_ula_lec)) == 0)
lec_handle_bridge(skb, dev);
#endif
/* Make sure we have room for lec_id */
if (skb_headroom(skb) < 2) {
pr_debug("reallocating skb\n");
skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN);
if (unlikely(!skb2)) {
kfree_skb(skb);
return NETDEV_TX_OK;
}
consume_skb(skb);
skb = skb2;
}
skb_push(skb, 2);
/* Put le header to place */
lec_h = (struct lecdatahdr_8023 *)skb->data;
lec_h->le_header = htons(priv->lecid);
#if DUMP_PACKETS >= 2
#define MAX_DUMP_SKB 99
#elif DUMP_PACKETS >= 1
#define MAX_DUMP_SKB 30
#endif
#if DUMP_PACKETS >= 1
printk(KERN_DEBUG "%s: send datalen:%ld lecid:%4.4x\n",
dev->name, skb->len, priv->lecid);
print_hex_dump(KERN_DEBUG, "", DUMP_OFFSET, 16, 1,
skb->data, min(skb->len, MAX_DUMP_SKB), true);
#endif /* DUMP_PACKETS >= 1 */
/* Minimum ethernet-frame size */
min_frame_size = LEC_MINIMUM_8023_SIZE;
if (skb->len < min_frame_size) {
if ((skb->len + skb_tailroom(skb)) < min_frame_size) {
skb2 = skb_copy_expand(skb, 0,
min_frame_size - skb->truesize,
GFP_ATOMIC);
dev_kfree_skb(skb);
if (skb2 == NULL) {
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
skb = skb2;
}
skb_put(skb, min_frame_size - skb->len);
}
/* Send to right vcc */
is_rdesc = 0;
dst = lec_h->h_dest;
entry = NULL;
vcc = lec_arp_resolve(priv, dst, is_rdesc, &entry);
pr_debug("%s:vcc:%p vcc_flags:%lx, entry:%p\n",
dev->name, vcc, vcc ? vcc->flags : 0, entry);
if (!vcc || !test_bit(ATM_VF_READY, &vcc->flags)) {
if (entry && (entry->tx_wait.qlen < LEC_UNRES_QUE_LEN)) {
pr_debug("%s:queuing packet, MAC address %pM\n",
dev->name, lec_h->h_dest);
skb_queue_tail(&entry->tx_wait, skb);
} else {
pr_debug("%s:tx queue full or no arp entry, dropping, MAC address: %pM\n",
dev->name, lec_h->h_dest);
dev->stats.tx_dropped++;
dev_kfree_skb(skb);
}
goto out;
}
#if DUMP_PACKETS > 0
printk(KERN_DEBUG "%s:sending to vpi:%d vci:%d\n",
dev->name, vcc->vpi, vcc->vci);
#endif /* DUMP_PACKETS > 0 */
while (entry && (skb2 = skb_dequeue(&entry->tx_wait))) {
pr_debug("emptying tx queue, MAC address %pM\n", lec_h->h_dest);
lec_send(vcc, skb2);
}
lec_send(vcc, skb);
if (!atm_may_send(vcc, 0)) {
struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc);
vpriv->xoff = 1;
netif_stop_queue(dev);
/*
* vcc->pop() might have occurred in between, making
* the vcc usuable again. Since xmit is serialized,
* this is the only situation we have to re-test.
*/
if (atm_may_send(vcc, 0))
netif_wake_queue(dev);
}
out:
if (entry)
lec_arp_put(entry);
netif_trans_update(dev);
return NETDEV_TX_OK;
}
/* The inverse routine to net_open(). */
static int lec_close(struct net_device *dev)
{
netif_stop_queue(dev);
return 0;
}
static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
{
static const u8 zero_addr[ETH_ALEN] = {};
unsigned long flags;
struct net_device *dev = (struct net_device *)vcc->proto_data;
struct lec_priv *priv = netdev_priv(dev);
struct atmlec_msg *mesg;
struct lec_arp_table *entry;
char *tmp; /* FIXME */
WARN_ON(refcount_sub_and_test(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc));
mesg = (struct atmlec_msg *)skb->data;
tmp = skb->data;
tmp += sizeof(struct atmlec_msg);
pr_debug("%s: msg from zeppelin:%d\n", dev->name, mesg->type);
switch (mesg->type) {
case l_set_mac_addr:
eth_hw_addr_set(dev, mesg->content.normal.mac_addr);
break;
case l_del_mac_addr:
eth_hw_addr_set(dev, zero_addr);
break;
case l_addr_delete:
lec_addr_delete(priv, mesg->content.normal.atm_addr,
mesg->content.normal.flag);
break;
case l_topology_change:
priv->topology_change = mesg->content.normal.flag;
break;
case l_flush_complete:
lec_flush_complete(priv, mesg->content.normal.flag);
break;
case l_narp_req: /* LANE2: see 7.1.35 in the lane2 spec */
spin_lock_irqsave(&priv->lec_arp_lock, flags);
entry = lec_arp_find(priv, mesg->content.normal.mac_addr);
lec_arp_remove(priv, entry);
spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
if (mesg->content.normal.no_source_le_narp)
break;
fallthrough;
case l_arp_update:
lec_arp_update(priv, mesg->content.normal.mac_addr,
mesg->content.normal.atm_addr,
mesg->content.normal.flag,
mesg->content.normal.targetless_le_arp);
pr_debug("in l_arp_update\n");
if (mesg->sizeoftlvs != 0) { /* LANE2 3.1.5 */
pr_debug("LANE2 3.1.5, got tlvs, size %d\n",
mesg->sizeoftlvs);
lane2_associate_ind(dev, mesg->content.normal.mac_addr,
tmp, mesg->sizeoftlvs);
}
break;
case l_config:
priv->maximum_unknown_frame_count =
mesg->content.config.maximum_unknown_frame_count;
priv->max_unknown_frame_time =
(mesg->content.config.max_unknown_frame_time * HZ);
priv->max_retry_count = mesg->content.config.max_retry_count;
priv->aging_time = (mesg->content.config.aging_time * HZ);
priv->forward_delay_time =
(mesg->content.config.forward_delay_time * HZ);
priv->arp_response_time =
(mesg->content.config.arp_response_time * HZ);
priv->flush_timeout = (mesg->content.config.flush_timeout * HZ);
priv->path_switching_delay =
(mesg->content.config.path_switching_delay * HZ);
priv->lane_version = mesg->content.config.lane_version;
/* LANE2 */
priv->lane2_ops = NULL;
if (priv->lane_version > 1)
priv->lane2_ops = &lane2_ops;
rtnl_lock();
if (dev_set_mtu(dev, mesg->content.config.mtu))
pr_info("%s: change_mtu to %d failed\n",
dev->name, mesg->content.config.mtu);
rtnl_unlock();
priv->is_proxy = mesg->content.config.is_proxy;
break;
case l_flush_tran_id:
lec_set_flush_tran_id(priv, mesg->content.normal.atm_addr,
mesg->content.normal.flag);
break;
case l_set_lecid:
priv->lecid =
(unsigned short)(0xffff & mesg->content.normal.flag);
break;
case l_should_bridge:
#if IS_ENABLED(CONFIG_BRIDGE)
{
pr_debug("%s: bridge zeppelin asks about %pM\n",
dev->name, mesg->content.proxy.mac_addr);
if (br_fdb_test_addr_hook == NULL)
break;
if (br_fdb_test_addr_hook(dev, mesg->content.proxy.mac_addr)) {
/* hit from bridge table, send LE_ARP_RESPONSE */
struct sk_buff *skb2;
struct sock *sk;
pr_debug("%s: entry found, responding to zeppelin\n",
dev->name);
skb2 = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC);
if (skb2 == NULL)
break;
skb2->len = sizeof(struct atmlec_msg);
skb_copy_to_linear_data(skb2, mesg, sizeof(*mesg));
atm_force_charge(priv->lecd, skb2->truesize);
sk = sk_atm(priv->lecd);
skb_queue_tail(&sk->sk_receive_queue, skb2);
sk->sk_data_ready(sk);
}
}
#endif /* IS_ENABLED(CONFIG_BRIDGE) */
break;
default:
pr_info("%s: Unknown message type %d\n", dev->name, mesg->type);
dev_kfree_skb(skb);
return -EINVAL;
}
dev_kfree_skb(skb);
return 0;
}
static void lec_atm_close(struct atm_vcc *vcc)
{
struct sk_buff *skb;
struct net_device *dev = (struct net_device *)vcc->proto_data;
struct lec_priv *priv = netdev_priv(dev);
priv->lecd = NULL;
/* Do something needful? */
netif_stop_queue(dev);
lec_arp_destroy(priv);
if (skb_peek(&sk_atm(vcc)->sk_receive_queue))
pr_info("%s closing with messages pending\n", dev->name);
while ((skb = skb_dequeue(&sk_atm(vcc)->sk_receive_queue))) {
atm_return(vcc, skb->truesize);
dev_kfree_skb(skb);
}
pr_info("%s: Shut down!\n", dev->name);
module_put(THIS_MODULE);
}
static const struct atmdev_ops lecdev_ops = {
.close = lec_atm_close,
.send = lec_atm_send
};
static struct atm_dev lecatm_dev = {
.ops = &lecdev_ops,
.type = "lec",
.number = 999, /* dummy device number */
.lock = __SPIN_LOCK_UNLOCKED(lecatm_dev.lock)
};
/*
* LANE2: new argument struct sk_buff *data contains
* the LE_ARP based TLVs introduced in the LANE2 spec
*/
static int
send_to_lecd(struct lec_priv *priv, atmlec_msg_type type,
const unsigned char *mac_addr, const unsigned char *atm_addr,
struct sk_buff *data)
{
struct sock *sk;
struct sk_buff *skb;
struct atmlec_msg *mesg;
if (!priv || !priv->lecd)
return -1;
skb = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC);
if (!skb)
return -1;
skb->len = sizeof(struct atmlec_msg);
mesg = (struct atmlec_msg *)skb->data;
memset(mesg, 0, sizeof(struct atmlec_msg));
mesg->type = type;
if (data != NULL)
mesg->sizeoftlvs = data->len;
if (mac_addr)
ether_addr_copy(mesg->content.normal.mac_addr, mac_addr);
else
mesg->content.normal.targetless_le_arp = 1;
if (atm_addr)
memcpy(&mesg->content.normal.atm_addr, atm_addr, ATM_ESA_LEN);
atm_force_charge(priv->lecd, skb->truesize);
sk = sk_atm(priv->lecd);
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_data_ready(sk);
if (data != NULL) {
pr_debug("about to send %d bytes of data\n", data->len);
atm_force_charge(priv->lecd, data->truesize);
skb_queue_tail(&sk->sk_receive_queue, data);
sk->sk_data_ready(sk);
}
return 0;
}
static void lec_set_multicast_list(struct net_device *dev)
{
/*
* by default, all multicast frames arrive over the bus.
* eventually support selective multicast service
*/
}
static const struct net_device_ops lec_netdev_ops = {
.ndo_open = lec_open,
.ndo_stop = lec_close,
.ndo_start_xmit = lec_start_xmit,
.ndo_tx_timeout = lec_tx_timeout,
.ndo_set_rx_mode = lec_set_multicast_list,
};
static const unsigned char lec_ctrl_magic[] = {
0xff,
0x00,
0x01,
0x01
};
#define LEC_DATA_DIRECT_8023 2
#define LEC_DATA_DIRECT_8025 3
static int lec_is_data_direct(struct atm_vcc *vcc)
{
return ((vcc->sap.blli[0].l3.tr9577.snap[4] == LEC_DATA_DIRECT_8023) ||
(vcc->sap.blli[0].l3.tr9577.snap[4] == LEC_DATA_DIRECT_8025));
}
static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb)
{
unsigned long flags;
struct net_device *dev = (struct net_device *)vcc->proto_data;
struct lec_priv *priv = netdev_priv(dev);
#if DUMP_PACKETS > 0
printk(KERN_DEBUG "%s: vcc vpi:%d vci:%d\n",
dev->name, vcc->vpi, vcc->vci);
#endif
if (!skb) {
pr_debug("%s: null skb\n", dev->name);
lec_vcc_close(priv, vcc);
return;
}
#if DUMP_PACKETS >= 2
#define MAX_SKB_DUMP 99
#elif DUMP_PACKETS >= 1
#define MAX_SKB_DUMP 30
#endif
#if DUMP_PACKETS > 0
printk(KERN_DEBUG "%s: rcv datalen:%ld lecid:%4.4x\n",
dev->name, skb->len, priv->lecid);
print_hex_dump(KERN_DEBUG, "", DUMP_OFFSET, 16, 1,
skb->data, min(MAX_SKB_DUMP, skb->len), true);
#endif /* DUMP_PACKETS > 0 */
if (memcmp(skb->data, lec_ctrl_magic, 4) == 0) {
/* Control frame, to daemon */
struct sock *sk = sk_atm(vcc);
pr_debug("%s: To daemon\n", dev->name);
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_data_ready(sk);
} else { /* Data frame, queue to protocol handlers */
struct lec_arp_table *entry;
unsigned char *src, *dst;
atm_return(vcc, skb->truesize);
if (*(__be16 *) skb->data == htons(priv->lecid) ||
!priv->lecd || !(dev->flags & IFF_UP)) {
/*
* Probably looping back, or if lecd is missing,
* lecd has gone down
*/
pr_debug("Ignoring frame...\n");
dev_kfree_skb(skb);
return;
}
dst = ((struct lecdatahdr_8023 *)skb->data)->h_dest;
/*
* If this is a Data Direct VCC, and the VCC does not match
* the LE_ARP cache entry, delete the LE_ARP cache entry.
*/
spin_lock_irqsave(&priv->lec_arp_lock, flags);
if (lec_is_data_direct(vcc)) {
src = ((struct lecdatahdr_8023 *)skb->data)->h_source;
entry = lec_arp_find(priv, src);
if (entry && entry->vcc != vcc) {
lec_arp_remove(priv, entry);
lec_arp_put(entry);
}
}
spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
if (!(dst[0] & 0x01) && /* Never filter Multi/Broadcast */
!priv->is_proxy && /* Proxy wants all the packets */
memcmp(dst, dev->dev_addr, dev->addr_len)) {
dev_kfree_skb(skb);
return;
}
if (!hlist_empty(&priv->lec_arp_empty_ones))
lec_arp_check_empties(priv, vcc, skb);
skb_pull(skb, 2); /* skip lec_id */
skb->protocol = eth_type_trans(skb, dev);
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data));
netif_rx(skb);
}
}
static void lec_pop(struct atm_vcc *vcc, struct sk_buff *skb)
{
struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc);
struct net_device *dev = skb->dev;
if (vpriv == NULL) {
pr_info("vpriv = NULL!?!?!?\n");
return;
}
vpriv->old_pop(vcc, skb);
if (vpriv->xoff && atm_may_send(vcc, 0)) {
vpriv->xoff = 0;
if (netif_running(dev) && netif_queue_stopped(dev))
netif_wake_queue(dev);
}
}
static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
{
struct lec_vcc_priv *vpriv;
int bytes_left;
struct atmlec_ioc ioc_data;
/* Lecd must be up in this case */
bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc));
if (bytes_left != 0)
pr_info("copy from user failed for %d bytes\n", bytes_left);
if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF)
return -EINVAL;
ioc_data.dev_num = array_index_nospec(ioc_data.dev_num, MAX_LEC_ITF);
if (!dev_lec[ioc_data.dev_num])
return -EINVAL;
vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL);
if (!vpriv)
return -ENOMEM;
vpriv->xoff = 0;
vpriv->old_pop = vcc->pop;
vcc->user_back = vpriv;
vcc->pop = lec_pop;
lec_vcc_added(netdev_priv(dev_lec[ioc_data.dev_num]),
&ioc_data, vcc, vcc->push);
vcc->proto_data = dev_lec[ioc_data.dev_num];
vcc->push = lec_push;
return 0;
}
static int lec_mcast_attach(struct atm_vcc *vcc, int arg)
{
if (arg < 0 || arg >= MAX_LEC_ITF)
return -EINVAL;
arg = array_index_nospec(arg, MAX_LEC_ITF);
if (!dev_lec[arg])
return -EINVAL;
vcc->proto_data = dev_lec[arg];
return lec_mcast_make(netdev_priv(dev_lec[arg]), vcc);
}
/* Initialize device. */
static int lecd_attach(struct atm_vcc *vcc, int arg)
{
int i;
struct lec_priv *priv;
if (arg < 0)
arg = 0;
if (arg >= MAX_LEC_ITF)
return -EINVAL;
i = array_index_nospec(arg, MAX_LEC_ITF);
if (!dev_lec[i]) {
int size;
size = sizeof(struct lec_priv);
dev_lec[i] = alloc_etherdev(size);
if (!dev_lec[i])
return -ENOMEM;
dev_lec[i]->netdev_ops = &lec_netdev_ops;
dev_lec[i]->max_mtu = 18190;
snprintf(dev_lec[i]->name, IFNAMSIZ, "lec%d", i);
if (register_netdev(dev_lec[i])) {
free_netdev(dev_lec[i]);
return -EINVAL;
}
priv = netdev_priv(dev_lec[i]);
} else {
priv = netdev_priv(dev_lec[i]);
if (priv->lecd)
return -EADDRINUSE;
}
lec_arp_init(priv);
priv->itfnum = i; /* LANE2 addition */
priv->lecd = vcc;
vcc->dev = &lecatm_dev;
vcc_insert_socket(sk_atm(vcc));
vcc->proto_data = dev_lec[i];
set_bit(ATM_VF_META, &vcc->flags);
set_bit(ATM_VF_READY, &vcc->flags);
/* Set default values to these variables */
priv->maximum_unknown_frame_count = 1;
priv->max_unknown_frame_time = (1 * HZ);
priv->vcc_timeout_period = (1200 * HZ);
priv->max_retry_count = 1;
priv->aging_time = (300 * HZ);
priv->forward_delay_time = (15 * HZ);
priv->topology_change = 0;
priv->arp_response_time = (1 * HZ);
priv->flush_timeout = (4 * HZ);
priv->path_switching_delay = (6 * HZ);
if (dev_lec[i]->flags & IFF_UP)
netif_start_queue(dev_lec[i]);
__module_get(THIS_MODULE);
return i;
}
#ifdef CONFIG_PROC_FS
static const char *lec_arp_get_status_string(unsigned char status)
{
static const char *const lec_arp_status_string[] = {
"ESI_UNKNOWN ",
"ESI_ARP_PENDING ",
"ESI_VC_PENDING ",
"<Undefined> ",
"ESI_FLUSH_PENDING ",
"ESI_FORWARD_DIRECT"
};
if (status > ESI_FORWARD_DIRECT)
status = 3; /* ESI_UNDEFINED */
return lec_arp_status_string[status];
}
static void lec_info(struct seq_file *seq, struct lec_arp_table *entry)
{
seq_printf(seq, "%pM ", entry->mac_addr);
seq_printf(seq, "%*phN ", ATM_ESA_LEN, entry->atm_addr);
seq_printf(seq, "%s %4.4x", lec_arp_get_status_string(entry->status),
entry->flags & 0xffff);
if (entry->vcc)
seq_printf(seq, "%3d %3d ", entry->vcc->vpi, entry->vcc->vci);
else
seq_printf(seq, " ");
if (entry->recv_vcc) {
seq_printf(seq, " %3d %3d", entry->recv_vcc->vpi,
entry->recv_vcc->vci);
}
seq_putc(seq, '\n');
}
struct lec_state {
unsigned long flags;
struct lec_priv *locked;
struct hlist_node *node;
struct net_device *dev;
int itf;
int arp_table;
int misc_table;
};
static void *lec_tbl_walk(struct lec_state *state, struct hlist_head *tbl,
loff_t *l)
{
struct hlist_node *e = state->node;
if (!e)
e = tbl->first;
if (e == SEQ_START_TOKEN) {
e = tbl->first;
--*l;
}
for (; e; e = e->next) {
if (--*l < 0)
break;
}
state->node = e;
return (*l < 0) ? state : NULL;
}
static void *lec_arp_walk(struct lec_state *state, loff_t *l,
struct lec_priv *priv)
{
void *v = NULL;
int p;
for (p = state->arp_table; p < LEC_ARP_TABLE_SIZE; p++) {
v = lec_tbl_walk(state, &priv->lec_arp_tables[p], l);
if (v)
break;
}
state->arp_table = p;
return v;
}
static void *lec_misc_walk(struct lec_state *state, loff_t *l,
struct lec_priv *priv)
{
struct hlist_head *lec_misc_tables[] = {
&priv->lec_arp_empty_ones,
&priv->lec_no_forward,
&priv->mcast_fwds
};
void *v = NULL;
int q;
for (q = state->misc_table; q < ARRAY_SIZE(lec_misc_tables); q++) {
v = lec_tbl_walk(state, lec_misc_tables[q], l);
if (v)
break;
}
state->misc_table = q;
return v;
}
static void *lec_priv_walk(struct lec_state *state, loff_t *l,
struct lec_priv *priv)
{
if (!state->locked) {
state->locked = priv;
spin_lock_irqsave(&priv->lec_arp_lock, state->flags);
}
if (!lec_arp_walk(state, l, priv) && !lec_misc_walk(state, l, priv)) {
spin_unlock_irqrestore(&priv->lec_arp_lock, state->flags);
state->locked = NULL;
/* Partial state reset for the next time we get called */
state->arp_table = state->misc_table = 0;
}
return state->locked;
}
static void *lec_itf_walk(struct lec_state *state, loff_t *l)
{
struct net_device *dev;
void *v;
dev = state->dev ? state->dev : dev_lec[state->itf];
v = (dev && netdev_priv(dev)) ?
lec_priv_walk(state, l, netdev_priv(dev)) : NULL;
if (!v && dev) {
dev_put(dev);
/* Partial state reset for the next time we get called */
dev = NULL;
}
state->dev = dev;
return v;
}
static void *lec_get_idx(struct lec_state *state, loff_t l)
{
void *v = NULL;
for (; state->itf < MAX_LEC_ITF; state->itf++) {
v = lec_itf_walk(state, &l);
if (v)
break;
}
return v;
}
static void *lec_seq_start(struct seq_file *seq, loff_t *pos)
{
struct lec_state *state = seq->private;
state->itf = 0;
state->dev = NULL;
state->locked = NULL;
state->arp_table = 0;
state->misc_table = 0;
state->node = SEQ_START_TOKEN;
return *pos ? lec_get_idx(state, *pos) : SEQ_START_TOKEN;
}
static void lec_seq_stop(struct seq_file *seq, void *v)
{
struct lec_state *state = seq->private;
if (state->dev) {
spin_unlock_irqrestore(&state->locked->lec_arp_lock,
state->flags);
dev_put(state->dev);
}
}
static void *lec_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct lec_state *state = seq->private;
++*pos;
return lec_get_idx(state, 1);
}
static int lec_seq_show(struct seq_file *seq, void *v)
{
static const char lec_banner[] =
"Itf MAC ATM destination"
" Status Flags "
"VPI/VCI Recv VPI/VCI\n";
if (v == SEQ_START_TOKEN)
seq_puts(seq, lec_banner);
else {
struct lec_state *state = seq->private;
struct net_device *dev = state->dev;
struct lec_arp_table *entry = hlist_entry(state->node,
struct lec_arp_table,
next);
seq_printf(seq, "%s ", dev->name);
lec_info(seq, entry);
}
return 0;
}
static const struct seq_operations lec_seq_ops = {
.start = lec_seq_start,
.next = lec_seq_next,
.stop = lec_seq_stop,
.show = lec_seq_show,
};
#endif
static int lane_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct atm_vcc *vcc = ATM_SD(sock);
int err = 0;
switch (cmd) {
case ATMLEC_CTRL:
case ATMLEC_MCAST:
case ATMLEC_DATA:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
break;
default:
return -ENOIOCTLCMD;
}
switch (cmd) {
case ATMLEC_CTRL:
err = lecd_attach(vcc, (int)arg);
if (err >= 0)
sock->state = SS_CONNECTED;
break;
case ATMLEC_MCAST:
err = lec_mcast_attach(vcc, (int)arg);
break;
case ATMLEC_DATA:
err = lec_vcc_attach(vcc, (void __user *)arg);
break;
}
return err;
}
static struct atm_ioctl lane_ioctl_ops = {
.owner = THIS_MODULE,
.ioctl = lane_ioctl,
};
static int __init lane_module_init(void)
{
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *p;
p = proc_create_seq_private("lec", 0444, atm_proc_root, &lec_seq_ops,
sizeof(struct lec_state), NULL);
if (!p) {
pr_err("Unable to initialize /proc/net/atm/lec\n");
return -ENOMEM;
}
#endif
register_atm_ioctl(&lane_ioctl_ops);
pr_info("lec.c: initialized\n");
return 0;
}
static void __exit lane_module_cleanup(void)
{
int i;
#ifdef CONFIG_PROC_FS
remove_proc_entry("lec", atm_proc_root);
#endif
deregister_atm_ioctl(&lane_ioctl_ops);
for (i = 0; i < MAX_LEC_ITF; i++) {
if (dev_lec[i] != NULL) {
unregister_netdev(dev_lec[i]);
free_netdev(dev_lec[i]);
dev_lec[i] = NULL;
}
}
}
module_init(lane_module_init);
module_exit(lane_module_cleanup);
/*
* LANE2: 3.1.3, LE_RESOLVE.request
* Non force allocates memory and fills in *tlvs, fills in *sizeoftlvs.
* If sizeoftlvs == NULL the default TLVs associated with this
* lec will be used.
* If dst_mac == NULL, targetless LE_ARP will be sent
*/
static int lane2_resolve(struct net_device *dev, const u8 *dst_mac, int force,
u8 **tlvs, u32 *sizeoftlvs)
{
unsigned long flags;
struct lec_priv *priv = netdev_priv(dev);
struct lec_arp_table *table;
struct sk_buff *skb;
int retval;
if (force == 0) {
spin_lock_irqsave(&priv->lec_arp_lock, flags);
table = lec_arp_find(priv, dst_mac);
spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
if (table == NULL)
return -1;
*tlvs = kmemdup(table->tlvs, table->sizeoftlvs, GFP_ATOMIC);
if (*tlvs == NULL)
return -1;
*sizeoftlvs = table->sizeoftlvs;
return 0;
}
if (sizeoftlvs == NULL)
retval = send_to_lecd(priv, l_arp_xmt, dst_mac, NULL, NULL);
else {
skb = alloc_skb(*sizeoftlvs, GFP_ATOMIC);
if (skb == NULL)
return -1;
skb->len = *sizeoftlvs;
skb_copy_to_linear_data(skb, *tlvs, *sizeoftlvs);
retval = send_to_lecd(priv, l_arp_xmt, dst_mac, NULL, skb);
}
return retval;
}
/*
* LANE2: 3.1.4, LE_ASSOCIATE.request
* Associate the *tlvs with the *lan_dst address.
* Will overwrite any previous association
* Returns 1 for success, 0 for failure (out of memory)
*
*/
static int lane2_associate_req(struct net_device *dev, const u8 *lan_dst,
const u8 *tlvs, u32 sizeoftlvs)
{
int retval;
struct sk_buff *skb;
struct lec_priv *priv = netdev_priv(dev);
if (!ether_addr_equal(lan_dst, dev->dev_addr))
return 0; /* not our mac address */
kfree(priv->tlvs); /* NULL if there was no previous association */
priv->tlvs = kmemdup(tlvs, sizeoftlvs, GFP_KERNEL);
if (priv->tlvs == NULL)
return 0;
priv->sizeoftlvs = sizeoftlvs;
skb = alloc_skb(sizeoftlvs, GFP_ATOMIC);
if (skb == NULL)
return 0;
skb->len = sizeoftlvs;
skb_copy_to_linear_data(skb, tlvs, sizeoftlvs);
retval = send_to_lecd(priv, l_associate_req, NULL, NULL, skb);
if (retval != 0)
pr_info("lec.c: lane2_associate_req() failed\n");
/*
* If the previous association has changed we must
* somehow notify other LANE entities about the change
*/
return 1;
}
/*
* LANE2: 3.1.5, LE_ASSOCIATE.indication
*
*/
static void lane2_associate_ind(struct net_device *dev, const u8 *mac_addr,
const u8 *tlvs, u32 sizeoftlvs)
{
#if 0
int i = 0;
#endif
struct lec_priv *priv = netdev_priv(dev);
#if 0 /*
* Why have the TLVs in LE_ARP entries
* since we do not use them? When you
* uncomment this code, make sure the
* TLVs get freed when entry is killed
*/
struct lec_arp_table *entry = lec_arp_find(priv, mac_addr);
if (entry == NULL)
return; /* should not happen */
kfree(entry->tlvs);
entry->tlvs = kmemdup(tlvs, sizeoftlvs, GFP_KERNEL);
if (entry->tlvs == NULL)
return;
entry->sizeoftlvs = sizeoftlvs;
#endif
#if 0
pr_info("\n");
pr_info("dump of tlvs, sizeoftlvs=%d\n", sizeoftlvs);
while (i < sizeoftlvs)
pr_cont("%02x ", tlvs[i++]);
pr_cont("\n");
#endif
/* tell MPOA about the TLVs we saw */
if (priv->lane2_ops && priv->lane2_ops->associate_indicator) {
priv->lane2_ops->associate_indicator(dev, mac_addr,
tlvs, sizeoftlvs);
}
}
/*
* Here starts what used to lec_arpc.c
*
* lec_arpc.c was added here when making
* lane client modular. October 1997
*/
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/param.h>
#include <linux/atomic.h>
#include <linux/inetdevice.h>
#include <net/route.h>
#if 0
#define pr_debug(format, args...)
/*
#define pr_debug printk
*/
#endif
#define DEBUG_ARP_TABLE 0
#define LEC_ARP_REFRESH_INTERVAL (3*HZ)
static void lec_arp_check_expire(struct work_struct *work);
static void lec_arp_expire_arp(struct timer_list *t);
/*
* Arp table funcs
*/
#define HASH(ch) (ch & (LEC_ARP_TABLE_SIZE - 1))
/*
* Initialization of arp-cache
*/
static void lec_arp_init(struct lec_priv *priv)
{
unsigned short i;
for (i = 0; i < LEC_ARP_TABLE_SIZE; i++)
INIT_HLIST_HEAD(&priv->lec_arp_tables[i]);
INIT_HLIST_HEAD(&priv->lec_arp_empty_ones);
INIT_HLIST_HEAD(&priv->lec_no_forward);
INIT_HLIST_HEAD(&priv->mcast_fwds);
spin_lock_init(&priv->lec_arp_lock);
INIT_DELAYED_WORK(&priv->lec_arp_work, lec_arp_check_expire);
schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL);
}
static void lec_arp_clear_vccs(struct lec_arp_table *entry)
{
if (entry->vcc) {
struct atm_vcc *vcc = entry->vcc;
struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc);
struct net_device *dev = (struct net_device *)vcc->proto_data;
vcc->pop = vpriv->old_pop;
if (vpriv->xoff)
netif_wake_queue(dev);
kfree(vpriv);
vcc->user_back = NULL;
vcc->push = entry->old_push;
vcc_release_async(vcc, -EPIPE);
entry->vcc = NULL;
}
if (entry->recv_vcc) {
struct atm_vcc *vcc = entry->recv_vcc;
struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc);
kfree(vpriv);
vcc->user_back = NULL;
entry->recv_vcc->push = entry->old_recv_push;
vcc_release_async(entry->recv_vcc, -EPIPE);
entry->recv_vcc = NULL;
}
}
/*
* Insert entry to lec_arp_table
* LANE2: Add to the end of the list to satisfy 8.1.13
*/
static inline void
lec_arp_add(struct lec_priv *priv, struct lec_arp_table *entry)
{
struct hlist_head *tmp;
tmp = &priv->lec_arp_tables[HASH(entry->mac_addr[ETH_ALEN - 1])];
hlist_add_head(&entry->next, tmp);
pr_debug("Added entry:%pM\n", entry->mac_addr);
}
/*
* Remove entry from lec_arp_table
*/
static int
lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove)
{
struct lec_arp_table *entry;
int i, remove_vcc = 1;
if (!to_remove)
return -1;
hlist_del(&to_remove->next);
del_timer(&to_remove->timer);
/*
* If this is the only MAC connected to this VCC,
* also tear down the VCC
*/
if (to_remove->status >= ESI_FLUSH_PENDING) {
/*
* ESI_FLUSH_PENDING, ESI_FORWARD_DIRECT
*/
for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
hlist_for_each_entry(entry,
&priv->lec_arp_tables[i], next) {
if (memcmp(to_remove->atm_addr,
entry->atm_addr, ATM_ESA_LEN) == 0) {
remove_vcc = 0;
break;
}
}
}
if (remove_vcc)
lec_arp_clear_vccs(to_remove);
}
skb_queue_purge(&to_remove->tx_wait); /* FIXME: good place for this? */
pr_debug("Removed entry:%pM\n", to_remove->mac_addr);
return 0;
}
#if DEBUG_ARP_TABLE
static const char *get_status_string(unsigned char st)
{
switch (st) {
case ESI_UNKNOWN:
return "ESI_UNKNOWN";
case ESI_ARP_PENDING:
return "ESI_ARP_PENDING";
case ESI_VC_PENDING:
return "ESI_VC_PENDING";
case ESI_FLUSH_PENDING:
return "ESI_FLUSH_PENDING";
case ESI_FORWARD_DIRECT:
return "ESI_FORWARD_DIRECT";
}
return "<UNKNOWN>";
}
static void dump_arp_table(struct lec_priv *priv)
{
struct lec_arp_table *rulla;
char buf[256];
int i, offset;
pr_info("Dump %p:\n", priv);
for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
hlist_for_each_entry(rulla,
&priv->lec_arp_tables[i], next) {
offset = 0;
offset += sprintf(buf, "%d: %p\n", i, rulla);
offset += sprintf(buf + offset, "Mac: %pM ",
rulla->mac_addr);
offset += sprintf(buf + offset, "Atm: %*ph ", ATM_ESA_LEN,
rulla->atm_addr);
offset += sprintf(buf + offset,
"Vcc vpi:%d vci:%d, Recv_vcc vpi:%d vci:%d Last_used:%lx, Timestamp:%lx, No_tries:%d ",
rulla->vcc ? rulla->vcc->vpi : 0,
rulla->vcc ? rulla->vcc->vci : 0,
rulla->recv_vcc ? rulla->recv_vcc->
vpi : 0,
rulla->recv_vcc ? rulla->recv_vcc->
vci : 0, rulla->last_used,
rulla->timestamp, rulla->no_tries);
offset +=
sprintf(buf + offset,
"Flags:%x, Packets_flooded:%x, Status: %s ",
rulla->flags, rulla->packets_flooded,
get_status_string(rulla->status));
pr_info("%s\n", buf);
}
}
if (!hlist_empty(&priv->lec_no_forward))
pr_info("No forward\n");
hlist_for_each_entry(rulla, &priv->lec_no_forward, next) {
offset = 0;
offset += sprintf(buf + offset, "Mac: %pM ", rulla->mac_addr);
offset += sprintf(buf + offset, "Atm: %*ph ", ATM_ESA_LEN,
rulla->atm_addr);
offset += sprintf(buf + offset,
"Vcc vpi:%d vci:%d, Recv_vcc vpi:%d vci:%d Last_used:%lx, Timestamp:%lx, No_tries:%d ",
rulla->vcc ? rulla->vcc->vpi : 0,
rulla->vcc ? rulla->vcc->vci : 0,
rulla->recv_vcc ? rulla->recv_vcc->vpi : 0,
rulla->recv_vcc ? rulla->recv_vcc->vci : 0,
rulla->last_used,
rulla->timestamp, rulla->no_tries);
offset += sprintf(buf + offset,
"Flags:%x, Packets_flooded:%x, Status: %s ",
rulla->flags, rulla->packets_flooded,
get_status_string(rulla->status));
pr_info("%s\n", buf);
}
if (!hlist_empty(&priv->lec_arp_empty_ones))
pr_info("Empty ones\n");
hlist_for_each_entry(rulla, &priv->lec_arp_empty_ones, next) {
offset = 0;
offset += sprintf(buf + offset, "Mac: %pM ", rulla->mac_addr);
offset += sprintf(buf + offset, "Atm: %*ph ", ATM_ESA_LEN,
rulla->atm_addr);
offset += sprintf(buf + offset,
"Vcc vpi:%d vci:%d, Recv_vcc vpi:%d vci:%d Last_used:%lx, Timestamp:%lx, No_tries:%d ",
rulla->vcc ? rulla->vcc->vpi : 0,
rulla->vcc ? rulla->vcc->vci : 0,
rulla->recv_vcc ? rulla->recv_vcc->vpi : 0,
rulla->recv_vcc ? rulla->recv_vcc->vci : 0,
rulla->last_used,
rulla->timestamp, rulla->no_tries);
offset += sprintf(buf + offset,
"Flags:%x, Packets_flooded:%x, Status: %s ",
rulla->flags, rulla->packets_flooded,
get_status_string(rulla->status));
pr_info("%s", buf);
}
if (!hlist_empty(&priv->mcast_fwds))
pr_info("Multicast Forward VCCs\n");
hlist_for_each_entry(rulla, &priv->mcast_fwds, next) {
offset = 0;
offset += sprintf(buf + offset, "Mac: %pM ", rulla->mac_addr);
offset += sprintf(buf + offset, "Atm: %*ph ", ATM_ESA_LEN,
rulla->atm_addr);
offset += sprintf(buf + offset,
"Vcc vpi:%d vci:%d, Recv_vcc vpi:%d vci:%d Last_used:%lx, Timestamp:%lx, No_tries:%d ",
rulla->vcc ? rulla->vcc->vpi : 0,
rulla->vcc ? rulla->vcc->vci : 0,
rulla->recv_vcc ? rulla->recv_vcc->vpi : 0,
rulla->recv_vcc ? rulla->recv_vcc->vci : 0,
rulla->last_used,
rulla->timestamp, rulla->no_tries);
offset += sprintf(buf + offset,
"Flags:%x, Packets_flooded:%x, Status: %s ",
rulla->flags, rulla->packets_flooded,
get_status_string(rulla->status));
pr_info("%s\n", buf);
}
}
#else
#define dump_arp_table(priv) do { } while (0)
#endif
/*
* Destruction of arp-cache
*/
static void lec_arp_destroy(struct lec_priv *priv)
{
unsigned long flags;
struct hlist_node *next;
struct lec_arp_table *entry;
int i;
cancel_delayed_work_sync(&priv->lec_arp_work);
/*
* Remove all entries
*/
spin_lock_irqsave(&priv->lec_arp_lock, flags);
for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
hlist_for_each_entry_safe(entry, next,
&priv->lec_arp_tables[i], next) {
lec_arp_remove(priv, entry);
lec_arp_put(entry);
}
INIT_HLIST_HEAD(&priv->lec_arp_tables[i]);
}
hlist_for_each_entry_safe(entry, next,
&priv->lec_arp_empty_ones, next) {
del_timer_sync(&entry->timer);
lec_arp_clear_vccs(entry);
hlist_del(&entry->next);
lec_arp_put(entry);
}
INIT_HLIST_HEAD(&priv->lec_arp_empty_ones);
hlist_for_each_entry_safe(entry, next,
&priv->lec_no_forward, next) {
del_timer_sync(&entry->timer);
lec_arp_clear_vccs(entry);
hlist_del(&entry->next);
lec_arp_put(entry);
}
INIT_HLIST_HEAD(&priv->lec_no_forward);
hlist_for_each_entry_safe(entry, next, &priv->mcast_fwds, next) {
/* No timer, LANEv2 7.1.20 and 2.3.5.3 */
lec_arp_clear_vccs(entry);
hlist_del(&entry->next);
lec_arp_put(entry);
}
INIT_HLIST_HEAD(&priv->mcast_fwds);
priv->mcast_vcc = NULL;
spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
}
/*
* Find entry by mac_address
*/
static struct lec_arp_table *lec_arp_find(struct lec_priv *priv,
const unsigned char *mac_addr)
{
struct hlist_head *head;
struct lec_arp_table *entry;
pr_debug("%pM\n", mac_addr);
head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])];
hlist_for_each_entry(entry, head, next) {
if (ether_addr_equal(mac_addr, entry->mac_addr))
return entry;
}
return NULL;
}
static struct lec_arp_table *make_entry(struct lec_priv *priv,
const unsigned char *mac_addr)
{
struct lec_arp_table *to_return;
to_return = kzalloc(sizeof(struct lec_arp_table), GFP_ATOMIC);
if (!to_return)
return NULL;
ether_addr_copy(to_return->mac_addr, mac_addr);
INIT_HLIST_NODE(&to_return->next);
timer_setup(&to_return->timer, lec_arp_expire_arp, 0);
to_return->last_used = jiffies;
to_return->priv = priv;
skb_queue_head_init(&to_return->tx_wait);
refcount_set(&to_return->usage, 1);
return to_return;
}
/* Arp sent timer expired */
static void lec_arp_expire_arp(struct timer_list *t)
{
struct lec_arp_table *entry;
entry = from_timer(entry, t, timer);
pr_debug("\n");
if (entry->status == ESI_ARP_PENDING) {
if (entry->no_tries <= entry->priv->max_retry_count) {
if (entry->is_rdesc)
send_to_lecd(entry->priv, l_rdesc_arp_xmt,
entry->mac_addr, NULL, NULL);
else
send_to_lecd(entry->priv, l_arp_xmt,
entry->mac_addr, NULL, NULL);
entry->no_tries++;
}
mod_timer(&entry->timer, jiffies + (1 * HZ));
}
}
/* Unknown/unused vcc expire, remove associated entry */
static void lec_arp_expire_vcc(struct timer_list *t)
{
unsigned long flags;
struct lec_arp_table *to_remove = from_timer(to_remove, t, timer);
struct lec_priv *priv = to_remove->priv;
del_timer(&to_remove->timer);
pr_debug("%p %p: vpi:%d vci:%d\n",
to_remove, priv,
to_remove->vcc ? to_remove->recv_vcc->vpi : 0,
to_remove->vcc ? to_remove->recv_vcc->vci : 0);
spin_lock_irqsave(&priv->lec_arp_lock, flags);
hlist_del(&to_remove->next);
spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
lec_arp_clear_vccs(to_remove);
lec_arp_put(to_remove);
}
static bool __lec_arp_check_expire(struct lec_arp_table *entry,
unsigned long now,
struct lec_priv *priv)
{
unsigned long time_to_check;
if ((entry->flags) & LEC_REMOTE_FLAG && priv->topology_change)
time_to_check = priv->forward_delay_time;
else
time_to_check = priv->aging_time;
pr_debug("About to expire: %lx - %lx > %lx\n",
now, entry->last_used, time_to_check);
if (time_after(now, entry->last_used + time_to_check) &&
!(entry->flags & LEC_PERMANENT_FLAG) &&
!(entry->mac_addr[0] & 0x01)) { /* LANE2: 7.1.20 */
/* Remove entry */
pr_debug("Entry timed out\n");
lec_arp_remove(priv, entry);
lec_arp_put(entry);
} else {
/* Something else */
if ((entry->status == ESI_VC_PENDING ||
entry->status == ESI_ARP_PENDING) &&
time_after_eq(now, entry->timestamp +
priv->max_unknown_frame_time)) {
entry->timestamp = jiffies;
entry->packets_flooded = 0;
if (entry->status == ESI_VC_PENDING)
send_to_lecd(priv, l_svc_setup,
entry->mac_addr,
entry->atm_addr,
NULL);
}
if (entry->status == ESI_FLUSH_PENDING &&
time_after_eq(now, entry->timestamp +
priv->path_switching_delay)) {
lec_arp_hold(entry);
return true;
}
}
return false;
}
/*
* Expire entries.
* 1. Re-set timer
* 2. For each entry, delete entries that have aged past the age limit.
* 3. For each entry, depending on the status of the entry, perform
* the following maintenance.
* a. If status is ESI_VC_PENDING or ESI_ARP_PENDING then if the
* tick_count is above the max_unknown_frame_time, clear
* the tick_count to zero and clear the packets_flooded counter
* to zero. This supports the packet rate limit per address
* while flooding unknowns.
* b. If the status is ESI_FLUSH_PENDING and the tick_count is greater
* than or equal to the path_switching_delay, change the status
* to ESI_FORWARD_DIRECT. This causes the flush period to end
* regardless of the progress of the flush protocol.
*/
static void lec_arp_check_expire(struct work_struct *work)
{
unsigned long flags;
struct lec_priv *priv =
container_of(work, struct lec_priv, lec_arp_work.work);
struct hlist_node *next;
struct lec_arp_table *entry;
unsigned long now;
int i;
pr_debug("%p\n", priv);
now = jiffies;
restart:
spin_lock_irqsave(&priv->lec_arp_lock, flags);
for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
hlist_for_each_entry_safe(entry, next,
&priv->lec_arp_tables[i], next) {
if (__lec_arp_check_expire(entry, now, priv)) {
struct sk_buff *skb;
struct atm_vcc *vcc = entry->vcc;
spin_unlock_irqrestore(&priv->lec_arp_lock,
flags);
while ((skb = skb_dequeue(&entry->tx_wait)))
lec_send(vcc, skb);
entry->last_used = jiffies;
entry->status = ESI_FORWARD_DIRECT;
lec_arp_put(entry);
goto restart;
}
}
}
spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL);
}
/*
* Try to find vcc where mac_address is attached.
*
*/
static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
const unsigned char *mac_to_find,
int is_rdesc,
struct lec_arp_table **ret_entry)
{
unsigned long flags;
struct lec_arp_table *entry;
struct atm_vcc *found;
if (mac_to_find[0] & 0x01) {
switch (priv->lane_version) {
case 1:
return priv->mcast_vcc;
case 2: /* LANE2 wants arp for multicast addresses */
if (ether_addr_equal(mac_to_find, bus_mac))
return priv->mcast_vcc;
break;
default:
break;
}
}
spin_lock_irqsave(&priv->lec_arp_lock, flags);
entry = lec_arp_find(priv, mac_to_find);
if (entry) {
if (entry->status == ESI_FORWARD_DIRECT) {
/* Connection Ok */
entry->last_used = jiffies;
lec_arp_hold(entry);
*ret_entry = entry;
found = entry->vcc;
goto out;
}
/*
* If the LE_ARP cache entry is still pending, reset count to 0
* so another LE_ARP request can be made for this frame.
*/
if (entry->status == ESI_ARP_PENDING)
entry->no_tries = 0;
/*
* Data direct VC not yet set up, check to see if the unknown
* frame count is greater than the limit. If the limit has
* not been reached, allow the caller to send packet to
* BUS.
*/
if (entry->status != ESI_FLUSH_PENDING &&
entry->packets_flooded <
priv->maximum_unknown_frame_count) {
entry->packets_flooded++;
pr_debug("Flooding..\n");
found = priv->mcast_vcc;
goto out;
}
/*
* We got here because entry->status == ESI_FLUSH_PENDING
* or BUS flood limit was reached for an entry which is
* in ESI_ARP_PENDING or ESI_VC_PENDING state.
*/
lec_arp_hold(entry);
*ret_entry = entry;
pr_debug("entry->status %d entry->vcc %p\n", entry->status,
entry->vcc);
found = NULL;
} else {
/* No matching entry was found */
entry = make_entry(priv, mac_to_find);
pr_debug("Making entry\n");
if (!entry) {
found = priv->mcast_vcc;
goto out;
}
lec_arp_add(priv, entry);
/* We want arp-request(s) to be sent */
entry->packets_flooded = 1;
entry->status = ESI_ARP_PENDING;
entry->no_tries = 1;
entry->last_used = entry->timestamp = jiffies;
entry->is_rdesc = is_rdesc;
if (entry->is_rdesc)
send_to_lecd(priv, l_rdesc_arp_xmt, mac_to_find, NULL,
NULL);
else
send_to_lecd(priv, l_arp_xmt, mac_to_find, NULL, NULL);
entry->timer.expires = jiffies + (1 * HZ);
entry->timer.function = lec_arp_expire_arp;
add_timer(&entry->timer);
found = priv->mcast_vcc;
}
out:
spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
return found;
}
static int
lec_addr_delete(struct lec_priv *priv, const unsigned char *atm_addr,
unsigned long permanent)
{
unsigned long flags;
struct hlist_node *next;
struct lec_arp_table *entry;
int i;
pr_debug("\n");
spin_lock_irqsave(&priv->lec_arp_lock, flags);
for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
hlist_for_each_entry_safe(entry, next,
&priv->lec_arp_tables[i], next) {
if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN) &&
(permanent ||
!(entry->flags & LEC_PERMANENT_FLAG))) {
lec_arp_remove(priv, entry);
lec_arp_put(entry);
}
spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
return 0;
}
}
spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
return -1;
}
/*
* Notifies: Response to arp_request (atm_addr != NULL)
*/
static void
lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
const unsigned char *atm_addr, unsigned long remoteflag,
unsigned int targetless_le_arp)
{
unsigned long flags;
struct hlist_node *next;
struct lec_arp_table *entry, *tmp;
int i;
pr_debug("%smac:%pM\n",
(targetless_le_arp) ? "targetless " : "", mac_addr);
spin_lock_irqsave(&priv->lec_arp_lock, flags);
entry = lec_arp_find(priv, mac_addr);
if (entry == NULL && targetless_le_arp)
goto out; /*
* LANE2: ignore targetless LE_ARPs for which
* we have no entry in the cache. 7.1.30
*/
if (!hlist_empty(&priv->lec_arp_empty_ones)) {
hlist_for_each_entry_safe(entry, next,
&priv->lec_arp_empty_ones, next) {
if (memcmp(entry->atm_addr, atm_addr, ATM_ESA_LEN) == 0) {
hlist_del(&entry->next);
del_timer(&entry->timer);
tmp = lec_arp_find(priv, mac_addr);
if (tmp) {
del_timer(&tmp->timer);
tmp->status = ESI_FORWARD_DIRECT;
memcpy(tmp->atm_addr, atm_addr, ATM_ESA_LEN);
tmp->vcc = entry->vcc;
tmp->old_push = entry->old_push;
tmp->last_used = jiffies;
del_timer(&entry->timer);
lec_arp_put(entry);
entry = tmp;
} else {
entry->status = ESI_FORWARD_DIRECT;
ether_addr_copy(entry->mac_addr,
mac_addr);
entry->last_used = jiffies;
lec_arp_add(priv, entry);
}
if (remoteflag)
entry->flags |= LEC_REMOTE_FLAG;
else
entry->flags &= ~LEC_REMOTE_FLAG;
pr_debug("After update\n");
dump_arp_table(priv);
goto out;
}
}
}
entry = lec_arp_find(priv, mac_addr);
if (!entry) {
entry = make_entry(priv, mac_addr);
if (!entry)
goto out;
entry->status = ESI_UNKNOWN;
lec_arp_add(priv, entry);
/* Temporary, changes before end of function */
}
memcpy(entry->atm_addr, atm_addr, ATM_ESA_LEN);
del_timer(&entry->timer);
for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
hlist_for_each_entry(tmp,
&priv->lec_arp_tables[i], next) {
if (entry != tmp &&
!memcmp(tmp->atm_addr, atm_addr, ATM_ESA_LEN)) {
/* Vcc to this host exists */
if (tmp->status > ESI_VC_PENDING) {
/*
* ESI_FLUSH_PENDING,
* ESI_FORWARD_DIRECT
*/
entry->vcc = tmp->vcc;
entry->old_push = tmp->old_push;
}
entry->status = tmp->status;
break;
}
}
}
if (remoteflag)
entry->flags |= LEC_REMOTE_FLAG;
else
entry->flags &= ~LEC_REMOTE_FLAG;
if (entry->status == ESI_ARP_PENDING || entry->status == ESI_UNKNOWN) {
entry->status = ESI_VC_PENDING;
send_to_lecd(priv, l_svc_setup, entry->mac_addr, atm_addr, NULL);
}
pr_debug("After update2\n");
dump_arp_table(priv);
out:
spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
}
/*
* Notifies: Vcc setup ready
*/
static void
lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
struct atm_vcc *vcc,
void (*old_push) (struct atm_vcc *vcc, struct sk_buff *skb))
{
unsigned long flags;
struct lec_arp_table *entry;
int i, found_entry = 0;
spin_lock_irqsave(&priv->lec_arp_lock, flags);
/* Vcc for Multicast Forward. No timer, LANEv2 7.1.20 and 2.3.5.3 */
if (ioc_data->receive == 2) {
pr_debug("LEC_ARP: Attaching mcast forward\n");
#if 0
entry = lec_arp_find(priv, bus_mac);
if (!entry) {
pr_info("LEC_ARP: Multicast entry not found!\n");
goto out;
}
memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN);
entry->recv_vcc = vcc;
entry->old_recv_push = old_push;
#endif
entry = make_entry(priv, bus_mac);
if (entry == NULL)
goto out;
del_timer(&entry->timer);
memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN);
entry->recv_vcc = vcc;
entry->old_recv_push = old_push;
hlist_add_head(&entry->next, &priv->mcast_fwds);
goto out;
} else if (ioc_data->receive == 1) {
/*
* Vcc which we don't want to make default vcc,
* attach it anyway.
*/
pr_debug("LEC_ARP:Attaching data direct, not default: %*phN\n",
ATM_ESA_LEN, ioc_data->atm_addr);
entry = make_entry(priv, bus_mac);
if (entry == NULL)
goto out;
memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN);
eth_zero_addr(entry->mac_addr);
entry->recv_vcc = vcc;
entry->old_recv_push = old_push;
entry->status = ESI_UNKNOWN;
entry->timer.expires = jiffies + priv->vcc_timeout_period;
entry->timer.function = lec_arp_expire_vcc;
hlist_add_head(&entry->next, &priv->lec_no_forward);
add_timer(&entry->timer);
dump_arp_table(priv);
goto out;
}
pr_debug("LEC_ARP:Attaching data direct, default: %*phN\n",
ATM_ESA_LEN, ioc_data->atm_addr);
for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
hlist_for_each_entry(entry,
&priv->lec_arp_tables[i], next) {
if (memcmp
(ioc_data->atm_addr, entry->atm_addr,
ATM_ESA_LEN) == 0) {
pr_debug("LEC_ARP: Attaching data direct\n");
pr_debug("Currently -> Vcc: %d, Rvcc:%d\n",
entry->vcc ? entry->vcc->vci : 0,
entry->recv_vcc ? entry->recv_vcc->
vci : 0);
found_entry = 1;
del_timer(&entry->timer);
entry->vcc = vcc;
entry->old_push = old_push;
if (entry->status == ESI_VC_PENDING) {
if (priv->maximum_unknown_frame_count
== 0)
entry->status =
ESI_FORWARD_DIRECT;
else {
entry->timestamp = jiffies;
entry->status =
ESI_FLUSH_PENDING;
#if 0
send_to_lecd(priv, l_flush_xmt,
NULL,
entry->atm_addr,
NULL);
#endif
}
} else {
/*
* They were forming a connection
* to us, and we to them. Our
* ATM address is numerically lower
* than theirs, so we make connection
* we formed into default VCC (8.1.11).
* Connection they made gets torn
* down. This might confuse some
* clients. Can be changed if
* someone reports trouble...
*/
;
}
}
}
}
if (found_entry) {
pr_debug("After vcc was added\n");
dump_arp_table(priv);
goto out;
}
/*
* Not found, snatch address from first data packet that arrives
* from this vcc
*/
entry = make_entry(priv, bus_mac);
if (!entry)
goto out;
entry->vcc = vcc;
entry->old_push = old_push;
memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN);
eth_zero_addr(entry->mac_addr);
entry->status = ESI_UNKNOWN;
hlist_add_head(&entry->next, &priv->lec_arp_empty_ones);
entry->timer.expires = jiffies + priv->vcc_timeout_period;
entry->timer.function = lec_arp_expire_vcc;
add_timer(&entry->timer);
pr_debug("After vcc was added\n");
dump_arp_table(priv);
out:
spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
}
static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id)
{
unsigned long flags;
struct lec_arp_table *entry;
int i;
pr_debug("%lx\n", tran_id);
restart:
spin_lock_irqsave(&priv->lec_arp_lock, flags);
for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
hlist_for_each_entry(entry,
&priv->lec_arp_tables[i], next) {
if (entry->flush_tran_id == tran_id &&
entry->status == ESI_FLUSH_PENDING) {
struct sk_buff *skb;
struct atm_vcc *vcc = entry->vcc;
lec_arp_hold(entry);
spin_unlock_irqrestore(&priv->lec_arp_lock,
flags);
while ((skb = skb_dequeue(&entry->tx_wait)))
lec_send(vcc, skb);
entry->last_used = jiffies;
entry->status = ESI_FORWARD_DIRECT;
lec_arp_put(entry);
pr_debug("LEC_ARP: Flushed\n");
goto restart;
}
}
}
spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
dump_arp_table(priv);
}
static void
lec_set_flush_tran_id(struct lec_priv *priv,
const unsigned char *atm_addr, unsigned long tran_id)
{
unsigned long flags;
struct lec_arp_table *entry;
int i;
spin_lock_irqsave(&priv->lec_arp_lock, flags);
for (i = 0; i < LEC_ARP_TABLE_SIZE; i++)
hlist_for_each_entry(entry,
&priv->lec_arp_tables[i], next) {
if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN)) {
entry->flush_tran_id = tran_id;
pr_debug("Set flush transaction id to %lx for %p\n",
tran_id, entry);
}
}
spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
}
static int lec_mcast_make(struct lec_priv *priv, struct atm_vcc *vcc)
{
unsigned long flags;
unsigned char mac_addr[] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
struct lec_arp_table *to_add;
struct lec_vcc_priv *vpriv;
int err = 0;
vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL);
if (!vpriv)
return -ENOMEM;
vpriv->xoff = 0;
vpriv->old_pop = vcc->pop;
vcc->user_back = vpriv;
vcc->pop = lec_pop;
spin_lock_irqsave(&priv->lec_arp_lock, flags);
to_add = make_entry(priv, mac_addr);
if (!to_add) {
vcc->pop = vpriv->old_pop;
kfree(vpriv);
err = -ENOMEM;
goto out;
}
memcpy(to_add->atm_addr, vcc->remote.sas_addr.prv, ATM_ESA_LEN);
to_add->status = ESI_FORWARD_DIRECT;
to_add->flags |= LEC_PERMANENT_FLAG;
to_add->vcc = vcc;
to_add->old_push = vcc->push;
vcc->push = lec_push;
priv->mcast_vcc = vcc;
lec_arp_add(priv, to_add);
out:
spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
return err;
}
static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
{
unsigned long flags;
struct hlist_node *next;
struct lec_arp_table *entry;
int i;
pr_debug("LEC_ARP: lec_vcc_close vpi:%d vci:%d\n", vcc->vpi, vcc->vci);
dump_arp_table(priv);
spin_lock_irqsave(&priv->lec_arp_lock, flags);
for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
hlist_for_each_entry_safe(entry, next,
&priv->lec_arp_tables[i], next) {
if (vcc == entry->vcc) {
lec_arp_remove(priv, entry);
lec_arp_put(entry);
if (priv->mcast_vcc == vcc)
priv->mcast_vcc = NULL;
}
}
}
hlist_for_each_entry_safe(entry, next,
&priv->lec_arp_empty_ones, next) {
if (entry->vcc == vcc) {
lec_arp_clear_vccs(entry);
del_timer(&entry->timer);
hlist_del(&entry->next);
lec_arp_put(entry);
}
}
hlist_for_each_entry_safe(entry, next,
&priv->lec_no_forward, next) {
if (entry->recv_vcc == vcc) {
lec_arp_clear_vccs(entry);
del_timer(&entry->timer);
hlist_del(&entry->next);
lec_arp_put(entry);
}
}
hlist_for_each_entry_safe(entry, next, &priv->mcast_fwds, next) {
if (entry->recv_vcc == vcc) {
lec_arp_clear_vccs(entry);
/* No timer, LANEv2 7.1.20 and 2.3.5.3 */
hlist_del(&entry->next);
lec_arp_put(entry);
}
}
spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
dump_arp_table(priv);
}
static void
lec_arp_check_empties(struct lec_priv *priv,
struct atm_vcc *vcc, struct sk_buff *skb)
{
unsigned long flags;
struct hlist_node *next;
struct lec_arp_table *entry, *tmp;
struct lecdatahdr_8023 *hdr = (struct lecdatahdr_8023 *)skb->data;
unsigned char *src = hdr->h_source;
spin_lock_irqsave(&priv->lec_arp_lock, flags);
hlist_for_each_entry_safe(entry, next,
&priv->lec_arp_empty_ones, next) {
if (vcc == entry->vcc) {
del_timer(&entry->timer);
ether_addr_copy(entry->mac_addr, src);
entry->status = ESI_FORWARD_DIRECT;
entry->last_used = jiffies;
/* We might have got an entry */
tmp = lec_arp_find(priv, src);
if (tmp) {
lec_arp_remove(priv, tmp);
lec_arp_put(tmp);
}
hlist_del(&entry->next);
lec_arp_add(priv, entry);
goto out;
}
}
pr_debug("LEC_ARP: Arp_check_empties: entry not found!\n");
out:
spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
}
MODULE_LICENSE("GPL");
| linux-master | net/atm/lec.c |
// SPDX-License-Identifier: GPL-2.0
/* ATM ioctl handling */
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
/* 2003 John Levon <[email protected]> */
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/net.h> /* struct socket, struct proto_ops */
#include <linux/atm.h> /* ATM stuff */
#include <linux/atmdev.h>
#include <linux/atmclip.h> /* CLIP_*ENCAP */
#include <linux/atmarp.h> /* manifest constants */
#include <linux/capability.h>
#include <linux/sonet.h> /* for ioctls */
#include <linux/atmsvc.h>
#include <linux/atmmpc.h>
#include <net/atmclip.h>
#include <linux/atmlec.h>
#include <linux/mutex.h>
#include <asm/ioctls.h>
#include <net/compat.h>
#include "resources.h"
#include "signaling.h" /* for WAITING and sigd_attach */
#include "common.h"
static DEFINE_MUTEX(ioctl_mutex);
static LIST_HEAD(ioctl_list);
void register_atm_ioctl(struct atm_ioctl *ioctl)
{
mutex_lock(&ioctl_mutex);
list_add_tail(&ioctl->list, &ioctl_list);
mutex_unlock(&ioctl_mutex);
}
EXPORT_SYMBOL(register_atm_ioctl);
void deregister_atm_ioctl(struct atm_ioctl *ioctl)
{
mutex_lock(&ioctl_mutex);
list_del(&ioctl->list);
mutex_unlock(&ioctl_mutex);
}
EXPORT_SYMBOL(deregister_atm_ioctl);
static int do_vcc_ioctl(struct socket *sock, unsigned int cmd,
unsigned long arg, int compat)
{
struct sock *sk = sock->sk;
struct atm_vcc *vcc;
int error;
struct list_head *pos;
void __user *argp = (void __user *)arg;
void __user *buf;
int __user *len;
vcc = ATM_SD(sock);
switch (cmd) {
case SIOCOUTQ:
if (sock->state != SS_CONNECTED ||
!test_bit(ATM_VF_READY, &vcc->flags)) {
error = -EINVAL;
goto done;
}
error = put_user(sk->sk_sndbuf - sk_wmem_alloc_get(sk),
(int __user *)argp) ? -EFAULT : 0;
goto done;
case SIOCINQ:
{
struct sk_buff *skb;
if (sock->state != SS_CONNECTED) {
error = -EINVAL;
goto done;
}
skb = skb_peek(&sk->sk_receive_queue);
error = put_user(skb ? skb->len : 0,
(int __user *)argp) ? -EFAULT : 0;
goto done;
}
case ATM_SETSC:
net_warn_ratelimited("ATM_SETSC is obsolete; used by %s:%d\n",
current->comm, task_pid_nr(current));
error = 0;
goto done;
case ATMSIGD_CTRL:
if (!capable(CAP_NET_ADMIN)) {
error = -EPERM;
goto done;
}
/*
* The user/kernel protocol for exchanging signalling
* info uses kernel pointers as opaque references,
* so the holder of the file descriptor can scribble
* on the kernel... so we should make sure that we
* have the same privileges that /proc/kcore needs
*/
if (!capable(CAP_SYS_RAWIO)) {
error = -EPERM;
goto done;
}
#ifdef CONFIG_COMPAT
/* WTF? I don't even want to _think_ about making this
work for 32-bit userspace. TBH I don't really want
to think about it at all. dwmw2. */
if (compat) {
net_warn_ratelimited("32-bit task cannot be atmsigd\n");
error = -EINVAL;
goto done;
}
#endif
error = sigd_attach(vcc);
if (!error)
sock->state = SS_CONNECTED;
goto done;
case ATM_SETBACKEND:
case ATM_NEWBACKENDIF:
{
atm_backend_t backend;
error = get_user(backend, (atm_backend_t __user *)argp);
if (error)
goto done;
switch (backend) {
case ATM_BACKEND_PPP:
request_module("pppoatm");
break;
case ATM_BACKEND_BR2684:
request_module("br2684");
break;
}
break;
}
case ATMMPC_CTRL:
case ATMMPC_DATA:
request_module("mpoa");
break;
case ATMARPD_CTRL:
request_module("clip");
break;
case ATMLEC_CTRL:
request_module("lec");
break;
}
error = -ENOIOCTLCMD;
mutex_lock(&ioctl_mutex);
list_for_each(pos, &ioctl_list) {
struct atm_ioctl *ic = list_entry(pos, struct atm_ioctl, list);
if (try_module_get(ic->owner)) {
error = ic->ioctl(sock, cmd, arg);
module_put(ic->owner);
if (error != -ENOIOCTLCMD)
break;
}
}
mutex_unlock(&ioctl_mutex);
if (error != -ENOIOCTLCMD)
goto done;
if (cmd == ATM_GETNAMES) {
if (IS_ENABLED(CONFIG_COMPAT) && compat) {
#ifdef CONFIG_COMPAT
struct compat_atm_iobuf __user *ciobuf = argp;
compat_uptr_t cbuf;
len = &ciobuf->length;
if (get_user(cbuf, &ciobuf->buffer))
return -EFAULT;
buf = compat_ptr(cbuf);
#endif
} else {
struct atm_iobuf __user *iobuf = argp;
len = &iobuf->length;
if (get_user(buf, &iobuf->buffer))
return -EFAULT;
}
error = atm_getnames(buf, len);
} else {
int number;
if (IS_ENABLED(CONFIG_COMPAT) && compat) {
#ifdef CONFIG_COMPAT
struct compat_atmif_sioc __user *csioc = argp;
compat_uptr_t carg;
len = &csioc->length;
if (get_user(carg, &csioc->arg))
return -EFAULT;
buf = compat_ptr(carg);
if (get_user(number, &csioc->number))
return -EFAULT;
#endif
} else {
struct atmif_sioc __user *sioc = argp;
len = &sioc->length;
if (get_user(buf, &sioc->arg))
return -EFAULT;
if (get_user(number, &sioc->number))
return -EFAULT;
}
error = atm_dev_ioctl(cmd, buf, len, number, compat);
}
done:
return error;
}
int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
return do_vcc_ioctl(sock, cmd, arg, 0);
}
#ifdef CONFIG_COMPAT
/*
* FIXME:
* The compat_ioctl handling is duplicated, using both these conversion
* routines and the compat argument to the actual handlers. Both
* versions are somewhat incomplete and should be merged, e.g. by
* moving the ioctl number translation into the actual handlers and
* killing the conversion code.
*
* -arnd, November 2009
*/
#define ATM_GETLINKRATE32 _IOW('a', ATMIOC_ITF+1, struct compat_atmif_sioc)
#define ATM_GETNAMES32 _IOW('a', ATMIOC_ITF+3, struct compat_atm_iobuf)
#define ATM_GETTYPE32 _IOW('a', ATMIOC_ITF+4, struct compat_atmif_sioc)
#define ATM_GETESI32 _IOW('a', ATMIOC_ITF+5, struct compat_atmif_sioc)
#define ATM_GETADDR32 _IOW('a', ATMIOC_ITF+6, struct compat_atmif_sioc)
#define ATM_RSTADDR32 _IOW('a', ATMIOC_ITF+7, struct compat_atmif_sioc)
#define ATM_ADDADDR32 _IOW('a', ATMIOC_ITF+8, struct compat_atmif_sioc)
#define ATM_DELADDR32 _IOW('a', ATMIOC_ITF+9, struct compat_atmif_sioc)
#define ATM_GETCIRANGE32 _IOW('a', ATMIOC_ITF+10, struct compat_atmif_sioc)
#define ATM_SETCIRANGE32 _IOW('a', ATMIOC_ITF+11, struct compat_atmif_sioc)
#define ATM_SETESI32 _IOW('a', ATMIOC_ITF+12, struct compat_atmif_sioc)
#define ATM_SETESIF32 _IOW('a', ATMIOC_ITF+13, struct compat_atmif_sioc)
#define ATM_GETSTAT32 _IOW('a', ATMIOC_SARCOM+0, struct compat_atmif_sioc)
#define ATM_GETSTATZ32 _IOW('a', ATMIOC_SARCOM+1, struct compat_atmif_sioc)
#define ATM_GETLOOP32 _IOW('a', ATMIOC_SARCOM+2, struct compat_atmif_sioc)
#define ATM_SETLOOP32 _IOW('a', ATMIOC_SARCOM+3, struct compat_atmif_sioc)
#define ATM_QUERYLOOP32 _IOW('a', ATMIOC_SARCOM+4, struct compat_atmif_sioc)
static struct {
unsigned int cmd32;
unsigned int cmd;
} atm_ioctl_map[] = {
{ ATM_GETLINKRATE32, ATM_GETLINKRATE },
{ ATM_GETNAMES32, ATM_GETNAMES },
{ ATM_GETTYPE32, ATM_GETTYPE },
{ ATM_GETESI32, ATM_GETESI },
{ ATM_GETADDR32, ATM_GETADDR },
{ ATM_RSTADDR32, ATM_RSTADDR },
{ ATM_ADDADDR32, ATM_ADDADDR },
{ ATM_DELADDR32, ATM_DELADDR },
{ ATM_GETCIRANGE32, ATM_GETCIRANGE },
{ ATM_SETCIRANGE32, ATM_SETCIRANGE },
{ ATM_SETESI32, ATM_SETESI },
{ ATM_SETESIF32, ATM_SETESIF },
{ ATM_GETSTAT32, ATM_GETSTAT },
{ ATM_GETSTATZ32, ATM_GETSTATZ },
{ ATM_GETLOOP32, ATM_GETLOOP },
{ ATM_SETLOOP32, ATM_SETLOOP },
{ ATM_QUERYLOOP32, ATM_QUERYLOOP },
};
#define NR_ATM_IOCTL ARRAY_SIZE(atm_ioctl_map)
static int do_atm_iobuf(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
struct compat_atm_iobuf __user *iobuf32 = compat_ptr(arg);
u32 data;
if (get_user(data, &iobuf32->buffer))
return -EFAULT;
return atm_getnames(&iobuf32->length, compat_ptr(data));
}
static int do_atmif_sioc(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
struct compat_atmif_sioc __user *sioc32 = compat_ptr(arg);
int number;
u32 data;
if (get_user(data, &sioc32->arg) || get_user(number, &sioc32->number))
return -EFAULT;
return atm_dev_ioctl(cmd, compat_ptr(data), &sioc32->length, number, 0);
}
static int do_atm_ioctl(struct socket *sock, unsigned int cmd32,
unsigned long arg)
{
int i;
unsigned int cmd = 0;
switch (cmd32) {
case SONET_GETSTAT:
case SONET_GETSTATZ:
case SONET_GETDIAG:
case SONET_SETDIAG:
case SONET_CLRDIAG:
case SONET_SETFRAMING:
case SONET_GETFRAMING:
case SONET_GETFRSENSE:
return do_atmif_sioc(sock, cmd32, arg);
}
for (i = 0; i < NR_ATM_IOCTL; i++) {
if (cmd32 == atm_ioctl_map[i].cmd32) {
cmd = atm_ioctl_map[i].cmd;
break;
}
}
if (i == NR_ATM_IOCTL)
return -EINVAL;
switch (cmd) {
case ATM_GETNAMES:
return do_atm_iobuf(sock, cmd, arg);
case ATM_GETLINKRATE:
case ATM_GETTYPE:
case ATM_GETESI:
case ATM_GETADDR:
case ATM_RSTADDR:
case ATM_ADDADDR:
case ATM_DELADDR:
case ATM_GETCIRANGE:
case ATM_SETCIRANGE:
case ATM_SETESI:
case ATM_SETESIF:
case ATM_GETSTAT:
case ATM_GETSTATZ:
case ATM_GETLOOP:
case ATM_SETLOOP:
case ATM_QUERYLOOP:
return do_atmif_sioc(sock, cmd, arg);
}
return -EINVAL;
}
int vcc_compat_ioctl(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
int ret;
ret = do_vcc_ioctl(sock, cmd, arg, 1);
if (ret != -ENOIOCTLCMD)
return ret;
return do_atm_ioctl(sock, cmd, arg);
}
#endif
| linux-master | net/atm/ioctl.c |
// SPDX-License-Identifier: GPL-2.0
/* ATM driver model support. */
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/kobject.h>
#include <linux/atmdev.h>
#include "common.h"
#include "resources.h"
#define to_atm_dev(cldev) container_of(cldev, struct atm_dev, class_dev)
static ssize_t type_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct atm_dev *adev = to_atm_dev(cdev);
return scnprintf(buf, PAGE_SIZE, "%s\n", adev->type);
}
static ssize_t address_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct atm_dev *adev = to_atm_dev(cdev);
return scnprintf(buf, PAGE_SIZE, "%pM\n", adev->esi);
}
static ssize_t atmaddress_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
unsigned long flags;
struct atm_dev *adev = to_atm_dev(cdev);
struct atm_dev_addr *aaddr;
int count = 0;
spin_lock_irqsave(&adev->lock, flags);
list_for_each_entry(aaddr, &adev->local, entry) {
count += scnprintf(buf + count, PAGE_SIZE - count,
"%1phN.%2phN.%10phN.%6phN.%1phN\n",
&aaddr->addr.sas_addr.prv[0],
&aaddr->addr.sas_addr.prv[1],
&aaddr->addr.sas_addr.prv[3],
&aaddr->addr.sas_addr.prv[13],
&aaddr->addr.sas_addr.prv[19]);
}
spin_unlock_irqrestore(&adev->lock, flags);
return count;
}
static ssize_t atmindex_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct atm_dev *adev = to_atm_dev(cdev);
return scnprintf(buf, PAGE_SIZE, "%d\n", adev->number);
}
static ssize_t carrier_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct atm_dev *adev = to_atm_dev(cdev);
return scnprintf(buf, PAGE_SIZE, "%d\n",
adev->signal == ATM_PHY_SIG_LOST ? 0 : 1);
}
static ssize_t link_rate_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct atm_dev *adev = to_atm_dev(cdev);
int link_rate;
/* show the link rate, not the data rate */
switch (adev->link_rate) {
case ATM_OC3_PCR:
link_rate = 155520000;
break;
case ATM_OC12_PCR:
link_rate = 622080000;
break;
case ATM_25_PCR:
link_rate = 25600000;
break;
default:
link_rate = adev->link_rate * 8 * 53;
}
return scnprintf(buf, PAGE_SIZE, "%d\n", link_rate);
}
static DEVICE_ATTR_RO(address);
static DEVICE_ATTR_RO(atmaddress);
static DEVICE_ATTR_RO(atmindex);
static DEVICE_ATTR_RO(carrier);
static DEVICE_ATTR_RO(type);
static DEVICE_ATTR_RO(link_rate);
static struct device_attribute *atm_attrs[] = {
&dev_attr_atmaddress,
&dev_attr_address,
&dev_attr_atmindex,
&dev_attr_carrier,
&dev_attr_type,
&dev_attr_link_rate,
NULL
};
static int atm_uevent(const struct device *cdev, struct kobj_uevent_env *env)
{
const struct atm_dev *adev;
if (!cdev)
return -ENODEV;
adev = to_atm_dev(cdev);
if (!adev)
return -ENODEV;
if (add_uevent_var(env, "NAME=%s%d", adev->type, adev->number))
return -ENOMEM;
return 0;
}
static void atm_release(struct device *cdev)
{
struct atm_dev *adev = to_atm_dev(cdev);
kfree(adev);
}
static struct class atm_class = {
.name = "atm",
.dev_release = atm_release,
.dev_uevent = atm_uevent,
};
int atm_register_sysfs(struct atm_dev *adev, struct device *parent)
{
struct device *cdev = &adev->class_dev;
int i, j, err;
cdev->class = &atm_class;
cdev->parent = parent;
dev_set_drvdata(cdev, adev);
dev_set_name(cdev, "%s%d", adev->type, adev->number);
err = device_register(cdev);
if (err < 0)
return err;
for (i = 0; atm_attrs[i]; i++) {
err = device_create_file(cdev, atm_attrs[i]);
if (err)
goto err_out;
}
return 0;
err_out:
for (j = 0; j < i; j++)
device_remove_file(cdev, atm_attrs[j]);
device_del(cdev);
return err;
}
void atm_unregister_sysfs(struct atm_dev *adev)
{
struct device *cdev = &adev->class_dev;
device_del(cdev);
}
int __init atm_sysfs_init(void)
{
return class_register(&atm_class);
}
void __exit atm_sysfs_exit(void)
{
class_unregister(&atm_class);
}
| linux-master | net/atm/atm_sysfs.c |
// SPDX-License-Identifier: GPL-2.0
/* net/atm/resources.c - Statically allocated resources */
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
/* Fixes
* Arnaldo Carvalho de Melo <[email protected]>
* 2002/01 - don't free the whole struct sock on sk->destruct time,
* use the default destruct function initialized by sock_init_data */
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/atmdev.h>
#include <linux/sonet.h>
#include <linux/kernel.h> /* for barrier */
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/capability.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <net/sock.h> /* for struct sock */
#include "common.h"
#include "resources.h"
#include "addr.h"
LIST_HEAD(atm_devs);
DEFINE_MUTEX(atm_dev_mutex);
static struct atm_dev *__alloc_atm_dev(const char *type)
{
struct atm_dev *dev;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return NULL;
dev->type = type;
dev->signal = ATM_PHY_SIG_UNKNOWN;
dev->link_rate = ATM_OC3_PCR;
spin_lock_init(&dev->lock);
INIT_LIST_HEAD(&dev->local);
INIT_LIST_HEAD(&dev->lecs);
return dev;
}
static struct atm_dev *__atm_dev_lookup(int number)
{
struct atm_dev *dev;
list_for_each_entry(dev, &atm_devs, dev_list) {
if (dev->number == number) {
atm_dev_hold(dev);
return dev;
}
}
return NULL;
}
struct atm_dev *atm_dev_lookup(int number)
{
struct atm_dev *dev;
mutex_lock(&atm_dev_mutex);
dev = __atm_dev_lookup(number);
mutex_unlock(&atm_dev_mutex);
return dev;
}
EXPORT_SYMBOL(atm_dev_lookup);
struct atm_dev *atm_dev_register(const char *type, struct device *parent,
const struct atmdev_ops *ops, int number,
unsigned long *flags)
{
struct atm_dev *dev, *inuse;
dev = __alloc_atm_dev(type);
if (!dev) {
pr_err("no space for dev %s\n", type);
return NULL;
}
mutex_lock(&atm_dev_mutex);
if (number != -1) {
inuse = __atm_dev_lookup(number);
if (inuse) {
atm_dev_put(inuse);
mutex_unlock(&atm_dev_mutex);
kfree(dev);
return NULL;
}
dev->number = number;
} else {
dev->number = 0;
while ((inuse = __atm_dev_lookup(dev->number))) {
atm_dev_put(inuse);
dev->number++;
}
}
dev->ops = ops;
if (flags)
dev->flags = *flags;
else
memset(&dev->flags, 0, sizeof(dev->flags));
memset(&dev->stats, 0, sizeof(dev->stats));
refcount_set(&dev->refcnt, 1);
if (atm_proc_dev_register(dev) < 0) {
pr_err("atm_proc_dev_register failed for dev %s\n", type);
goto out_fail;
}
if (atm_register_sysfs(dev, parent) < 0) {
pr_err("atm_register_sysfs failed for dev %s\n", type);
atm_proc_dev_deregister(dev);
goto out_fail;
}
list_add_tail(&dev->dev_list, &atm_devs);
out:
mutex_unlock(&atm_dev_mutex);
return dev;
out_fail:
kfree(dev);
dev = NULL;
goto out;
}
EXPORT_SYMBOL(atm_dev_register);
void atm_dev_deregister(struct atm_dev *dev)
{
BUG_ON(test_bit(ATM_DF_REMOVED, &dev->flags));
set_bit(ATM_DF_REMOVED, &dev->flags);
/*
* if we remove current device from atm_devs list, new device
* with same number can appear, such we need deregister proc,
* release async all vccs and remove them from vccs list too
*/
mutex_lock(&atm_dev_mutex);
list_del(&dev->dev_list);
mutex_unlock(&atm_dev_mutex);
atm_dev_release_vccs(dev);
atm_unregister_sysfs(dev);
atm_proc_dev_deregister(dev);
atm_dev_put(dev);
}
EXPORT_SYMBOL(atm_dev_deregister);
static void copy_aal_stats(struct k_atm_aal_stats *from,
struct atm_aal_stats *to)
{
#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
__AAL_STAT_ITEMS
#undef __HANDLE_ITEM
}
static void subtract_aal_stats(struct k_atm_aal_stats *from,
struct atm_aal_stats *to)
{
#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
__AAL_STAT_ITEMS
#undef __HANDLE_ITEM
}
static int fetch_stats(struct atm_dev *dev, struct atm_dev_stats __user *arg,
int zero)
{
struct atm_dev_stats tmp;
int error = 0;
copy_aal_stats(&dev->stats.aal0, &tmp.aal0);
copy_aal_stats(&dev->stats.aal34, &tmp.aal34);
copy_aal_stats(&dev->stats.aal5, &tmp.aal5);
if (arg)
error = copy_to_user(arg, &tmp, sizeof(tmp));
if (zero && !error) {
subtract_aal_stats(&dev->stats.aal0, &tmp.aal0);
subtract_aal_stats(&dev->stats.aal34, &tmp.aal34);
subtract_aal_stats(&dev->stats.aal5, &tmp.aal5);
}
return error ? -EFAULT : 0;
}
int atm_getnames(void __user *buf, int __user *iobuf_len)
{
int error, len, size = 0;
struct atm_dev *dev;
struct list_head *p;
int *tmp_buf, *tmp_p;
if (get_user(len, iobuf_len))
return -EFAULT;
mutex_lock(&atm_dev_mutex);
list_for_each(p, &atm_devs)
size += sizeof(int);
if (size > len) {
mutex_unlock(&atm_dev_mutex);
return -E2BIG;
}
tmp_buf = kmalloc(size, GFP_ATOMIC);
if (!tmp_buf) {
mutex_unlock(&atm_dev_mutex);
return -ENOMEM;
}
tmp_p = tmp_buf;
list_for_each_entry(dev, &atm_devs, dev_list) {
*tmp_p++ = dev->number;
}
mutex_unlock(&atm_dev_mutex);
error = ((copy_to_user(buf, tmp_buf, size)) ||
put_user(size, iobuf_len))
? -EFAULT : 0;
kfree(tmp_buf);
return error;
}
int atm_dev_ioctl(unsigned int cmd, void __user *buf, int __user *sioc_len,
int number, int compat)
{
int error, len, size = 0;
struct atm_dev *dev;
if (get_user(len, sioc_len))
return -EFAULT;
dev = try_then_request_module(atm_dev_lookup(number), "atm-device-%d",
number);
if (!dev)
return -ENODEV;
switch (cmd) {
case ATM_GETTYPE:
size = strlen(dev->type) + 1;
if (copy_to_user(buf, dev->type, size)) {
error = -EFAULT;
goto done;
}
break;
case ATM_GETESI:
size = ESI_LEN;
if (copy_to_user(buf, dev->esi, size)) {
error = -EFAULT;
goto done;
}
break;
case ATM_SETESI:
{
int i;
for (i = 0; i < ESI_LEN; i++)
if (dev->esi[i]) {
error = -EEXIST;
goto done;
}
}
fallthrough;
case ATM_SETESIF:
{
unsigned char esi[ESI_LEN];
if (!capable(CAP_NET_ADMIN)) {
error = -EPERM;
goto done;
}
if (copy_from_user(esi, buf, ESI_LEN)) {
error = -EFAULT;
goto done;
}
memcpy(dev->esi, esi, ESI_LEN);
error = ESI_LEN;
goto done;
}
case ATM_GETSTATZ:
if (!capable(CAP_NET_ADMIN)) {
error = -EPERM;
goto done;
}
fallthrough;
case ATM_GETSTAT:
size = sizeof(struct atm_dev_stats);
error = fetch_stats(dev, buf, cmd == ATM_GETSTATZ);
if (error)
goto done;
break;
case ATM_GETCIRANGE:
size = sizeof(struct atm_cirange);
if (copy_to_user(buf, &dev->ci_range, size)) {
error = -EFAULT;
goto done;
}
break;
case ATM_GETLINKRATE:
size = sizeof(int);
if (copy_to_user(buf, &dev->link_rate, size)) {
error = -EFAULT;
goto done;
}
break;
case ATM_RSTADDR:
if (!capable(CAP_NET_ADMIN)) {
error = -EPERM;
goto done;
}
atm_reset_addr(dev, ATM_ADDR_LOCAL);
break;
case ATM_ADDADDR:
case ATM_DELADDR:
case ATM_ADDLECSADDR:
case ATM_DELLECSADDR:
{
struct sockaddr_atmsvc addr;
if (!capable(CAP_NET_ADMIN)) {
error = -EPERM;
goto done;
}
if (copy_from_user(&addr, buf, sizeof(addr))) {
error = -EFAULT;
goto done;
}
if (cmd == ATM_ADDADDR || cmd == ATM_ADDLECSADDR)
error = atm_add_addr(dev, &addr,
(cmd == ATM_ADDADDR ?
ATM_ADDR_LOCAL : ATM_ADDR_LECS));
else
error = atm_del_addr(dev, &addr,
(cmd == ATM_DELADDR ?
ATM_ADDR_LOCAL : ATM_ADDR_LECS));
goto done;
}
case ATM_GETADDR:
case ATM_GETLECSADDR:
error = atm_get_addr(dev, buf, len,
(cmd == ATM_GETADDR ?
ATM_ADDR_LOCAL : ATM_ADDR_LECS));
if (error < 0)
goto done;
size = error;
/* may return 0, but later on size == 0 means "don't
write the length" */
error = put_user(size, sioc_len) ? -EFAULT : 0;
goto done;
case ATM_SETLOOP:
if (__ATM_LM_XTRMT((int) (unsigned long) buf) &&
__ATM_LM_XTLOC((int) (unsigned long) buf) >
__ATM_LM_XTRMT((int) (unsigned long) buf)) {
error = -EINVAL;
goto done;
}
fallthrough;
case ATM_SETCIRANGE:
case SONET_GETSTATZ:
case SONET_SETDIAG:
case SONET_CLRDIAG:
case SONET_SETFRAMING:
if (!capable(CAP_NET_ADMIN)) {
error = -EPERM;
goto done;
}
fallthrough;
default:
if (IS_ENABLED(CONFIG_COMPAT) && compat) {
#ifdef CONFIG_COMPAT
if (!dev->ops->compat_ioctl) {
error = -EINVAL;
goto done;
}
size = dev->ops->compat_ioctl(dev, cmd, buf);
#endif
} else {
if (!dev->ops->ioctl) {
error = -EINVAL;
goto done;
}
size = dev->ops->ioctl(dev, cmd, buf);
}
if (size < 0) {
error = (size == -ENOIOCTLCMD ? -ENOTTY : size);
goto done;
}
}
if (size)
error = put_user(size, sioc_len) ? -EFAULT : 0;
else
error = 0;
done:
atm_dev_put(dev);
return error;
}
#ifdef CONFIG_PROC_FS
void *atm_dev_seq_start(struct seq_file *seq, loff_t *pos)
{
mutex_lock(&atm_dev_mutex);
return seq_list_start_head(&atm_devs, *pos);
}
void atm_dev_seq_stop(struct seq_file *seq, void *v)
{
mutex_unlock(&atm_dev_mutex);
}
void *atm_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
return seq_list_next(v, &atm_devs, pos);
}
#endif
| linux-master | net/atm/resources.c |
// SPDX-License-Identifier: GPL-2.0
/* net/atm/atm_misc.c - Various functions for use by ATM drivers */
/* Written 1995-2000 by Werner Almesberger, EPFL ICA */
#include <linux/module.h>
#include <linux/atm.h>
#include <linux/atmdev.h>
#include <linux/skbuff.h>
#include <linux/sonet.h>
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/atomic.h>
int atm_charge(struct atm_vcc *vcc, int truesize)
{
atm_force_charge(vcc, truesize);
if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
return 1;
atm_return(vcc, truesize);
atomic_inc(&vcc->stats->rx_drop);
return 0;
}
EXPORT_SYMBOL(atm_charge);
struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
gfp_t gfp_flags)
{
struct sock *sk = sk_atm(vcc);
int guess = SKB_TRUESIZE(pdu_size);
atm_force_charge(vcc, guess);
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
struct sk_buff *skb = alloc_skb(pdu_size, gfp_flags);
if (skb) {
atomic_add(skb->truesize-guess,
&sk->sk_rmem_alloc);
return skb;
}
}
atm_return(vcc, guess);
atomic_inc(&vcc->stats->rx_drop);
return NULL;
}
EXPORT_SYMBOL(atm_alloc_charge);
/*
* atm_pcr_goal returns the positive PCR if it should be rounded up, the
* negative PCR if it should be rounded down, and zero if the maximum available
* bandwidth should be used.
*
* The rules are as follows (* = maximum, - = absent (0), x = value "x",
* (x+ = x or next value above x, x- = x or next value below):
*
* min max pcr result min max pcr result
* - - - * (UBR only) x - - x+
* - - * * x - * *
* - - z z- x - z z-
* - * - * x * - x+
* - * * * x * * *
* - * z z- x * z z-
* - y - y- x y - x+
* - y * y- x y * y-
* - y z z- x y z z-
*
* All non-error cases can be converted with the following simple set of rules:
*
* if pcr == z then z-
* else if min == x && pcr == - then x+
* else if max == y then y-
* else *
*/
int atm_pcr_goal(const struct atm_trafprm *tp)
{
if (tp->pcr && tp->pcr != ATM_MAX_PCR)
return -tp->pcr;
if (tp->min_pcr && !tp->pcr)
return tp->min_pcr;
if (tp->max_pcr != ATM_MAX_PCR)
return -tp->max_pcr;
return 0;
}
EXPORT_SYMBOL(atm_pcr_goal);
void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
{
#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
__SONET_ITEMS
#undef __HANDLE_ITEM
}
EXPORT_SYMBOL(sonet_copy_stats);
void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
{
#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
__SONET_ITEMS
#undef __HANDLE_ITEM
}
EXPORT_SYMBOL(sonet_subtract_stats);
| linux-master | net/atm/atm_misc.c |
// SPDX-License-Identifier: GPL-2.0
/* net/atm/proc.c - ATM /proc interface
*
* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA
*
* seq_file api usage by [email protected]
*
* Evaluating the efficiency of the whole thing if left as an exercise to
* the reader.
*/
#include <linux/module.h> /* for EXPORT_SYMBOL */
#include <linux/string.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/stat.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/errno.h>
#include <linux/atm.h>
#include <linux/atmdev.h>
#include <linux/netdevice.h>
#include <linux/atmclip.h>
#include <linux/init.h> /* for __init */
#include <linux/slab.h>
#include <net/net_namespace.h>
#include <net/atmclip.h>
#include <linux/uaccess.h>
#include <linux/param.h> /* for HZ */
#include <linux/atomic.h>
#include "resources.h"
#include "common.h" /* atm_proc_init prototype */
#include "signaling.h" /* to get sigd - ugly too */
static ssize_t proc_dev_atm_read(struct file *file, char __user *buf,
size_t count, loff_t *pos);
static const struct proc_ops atm_dev_proc_ops = {
.proc_read = proc_dev_atm_read,
.proc_lseek = noop_llseek,
};
static void add_stats(struct seq_file *seq, const char *aal,
const struct k_atm_aal_stats *stats)
{
seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
atomic_read(&stats->tx), atomic_read(&stats->tx_err),
atomic_read(&stats->rx), atomic_read(&stats->rx_err),
atomic_read(&stats->rx_drop));
}
static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
{
int i;
seq_printf(seq, "%3d %-8s", dev->number, dev->type);
for (i = 0; i < ESI_LEN; i++)
seq_printf(seq, "%02x", dev->esi[i]);
seq_puts(seq, " ");
add_stats(seq, "0", &dev->stats.aal0);
seq_puts(seq, " ");
add_stats(seq, "5", &dev->stats.aal5);
seq_printf(seq, "\t[%d]", refcount_read(&dev->refcnt));
seq_putc(seq, '\n');
}
struct vcc_state {
int bucket;
struct sock *sk;
};
static inline int compare_family(struct sock *sk, int family)
{
return !family || (sk->sk_family == family);
}
static int __vcc_walk(struct sock **sock, int family, int *bucket, loff_t l)
{
struct sock *sk = *sock;
if (sk == SEQ_START_TOKEN) {
for (*bucket = 0; *bucket < VCC_HTABLE_SIZE; ++*bucket) {
struct hlist_head *head = &vcc_hash[*bucket];
sk = hlist_empty(head) ? NULL : __sk_head(head);
if (sk)
break;
}
l--;
}
try_again:
for (; sk; sk = sk_next(sk)) {
l -= compare_family(sk, family);
if (l < 0)
goto out;
}
if (!sk && ++*bucket < VCC_HTABLE_SIZE) {
sk = sk_head(&vcc_hash[*bucket]);
goto try_again;
}
sk = SEQ_START_TOKEN;
out:
*sock = sk;
return (l < 0);
}
static inline void *vcc_walk(struct seq_file *seq, loff_t l)
{
struct vcc_state *state = seq->private;
int family = (uintptr_t)(pde_data(file_inode(seq->file)));
return __vcc_walk(&state->sk, family, &state->bucket, l) ?
state : NULL;
}
static void *vcc_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(vcc_sklist_lock)
{
struct vcc_state *state = seq->private;
loff_t left = *pos;
read_lock(&vcc_sklist_lock);
state->sk = SEQ_START_TOKEN;
return left ? vcc_walk(seq, left) : SEQ_START_TOKEN;
}
static void vcc_seq_stop(struct seq_file *seq, void *v)
__releases(vcc_sklist_lock)
{
read_unlock(&vcc_sklist_lock);
}
static void *vcc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
v = vcc_walk(seq, 1);
(*pos)++;
return v;
}
static void pvc_info(struct seq_file *seq, struct atm_vcc *vcc)
{
static const char *const class_name[] = {
"off", "UBR", "CBR", "VBR", "ABR"};
static const char *const aal_name[] = {
"---", "1", "2", "3/4", /* 0- 3 */
"???", "5", "???", "???", /* 4- 7 */
"???", "???", "???", "???", /* 8-11 */
"???", "0", "???", "???"}; /* 12-15 */
seq_printf(seq, "%3d %3d %5d %-3s %7d %-5s %7d %-6s",
vcc->dev->number, vcc->vpi, vcc->vci,
vcc->qos.aal >= ARRAY_SIZE(aal_name) ? "err" :
aal_name[vcc->qos.aal], vcc->qos.rxtp.min_pcr,
class_name[vcc->qos.rxtp.traffic_class],
vcc->qos.txtp.min_pcr,
class_name[vcc->qos.txtp.traffic_class]);
if (test_bit(ATM_VF_IS_CLIP, &vcc->flags)) {
struct clip_vcc *clip_vcc = CLIP_VCC(vcc);
struct net_device *dev;
dev = clip_vcc->entry ? clip_vcc->entry->neigh->dev : NULL;
seq_printf(seq, "CLIP, Itf:%s, Encap:",
dev ? dev->name : "none?");
seq_printf(seq, "%s", clip_vcc->encap ? "LLC/SNAP" : "None");
}
seq_putc(seq, '\n');
}
static const char *vcc_state(struct atm_vcc *vcc)
{
static const char *const map[] = { ATM_VS2TXT_MAP };
return map[ATM_VF2VS(vcc->flags)];
}
static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
{
struct sock *sk = sk_atm(vcc);
seq_printf(seq, "%pK ", vcc);
if (!vcc->dev)
seq_printf(seq, "Unassigned ");
else
seq_printf(seq, "%3d %3d %5d ", vcc->dev->number, vcc->vpi,
vcc->vci);
switch (sk->sk_family) {
case AF_ATMPVC:
seq_printf(seq, "PVC");
break;
case AF_ATMSVC:
seq_printf(seq, "SVC");
break;
default:
seq_printf(seq, "%3d", sk->sk_family);
}
seq_printf(seq, " %04lx %5d %7d/%7d %7d/%7d [%d]\n",
vcc->flags, sk->sk_err,
sk_wmem_alloc_get(sk), sk->sk_sndbuf,
sk_rmem_alloc_get(sk), sk->sk_rcvbuf,
refcount_read(&sk->sk_refcnt));
}
static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
{
if (!vcc->dev)
seq_printf(seq, sizeof(void *) == 4 ?
"N/A@%pK%10s" : "N/A@%pK%2s", vcc, "");
else
seq_printf(seq, "%3d %3d %5d ",
vcc->dev->number, vcc->vpi, vcc->vci);
seq_printf(seq, "%-10s ", vcc_state(vcc));
seq_printf(seq, "%s%s", vcc->remote.sas_addr.pub,
*vcc->remote.sas_addr.pub && *vcc->remote.sas_addr.prv ? "+" : "");
if (*vcc->remote.sas_addr.prv) {
int i;
for (i = 0; i < ATM_ESA_LEN; i++)
seq_printf(seq, "%02x", vcc->remote.sas_addr.prv[i]);
}
seq_putc(seq, '\n');
}
static int atm_dev_seq_show(struct seq_file *seq, void *v)
{
static char atm_dev_banner[] =
"Itf Type ESI/\"MAC\"addr "
"AAL(TX,err,RX,err,drop) ... [refcnt]\n";
if (v == &atm_devs)
seq_puts(seq, atm_dev_banner);
else {
struct atm_dev *dev = list_entry(v, struct atm_dev, dev_list);
atm_dev_info(seq, dev);
}
return 0;
}
static const struct seq_operations atm_dev_seq_ops = {
.start = atm_dev_seq_start,
.next = atm_dev_seq_next,
.stop = atm_dev_seq_stop,
.show = atm_dev_seq_show,
};
static int pvc_seq_show(struct seq_file *seq, void *v)
{
static char atm_pvc_banner[] =
"Itf VPI VCI AAL RX(PCR,Class) TX(PCR,Class)\n";
if (v == SEQ_START_TOKEN)
seq_puts(seq, atm_pvc_banner);
else {
struct vcc_state *state = seq->private;
struct atm_vcc *vcc = atm_sk(state->sk);
pvc_info(seq, vcc);
}
return 0;
}
static const struct seq_operations pvc_seq_ops = {
.start = vcc_seq_start,
.next = vcc_seq_next,
.stop = vcc_seq_stop,
.show = pvc_seq_show,
};
static int vcc_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN) {
seq_printf(seq, sizeof(void *) == 4 ? "%-8s%s" : "%-16s%s",
"Address ", "Itf VPI VCI Fam Flags Reply "
"Send buffer Recv buffer [refcnt]\n");
} else {
struct vcc_state *state = seq->private;
struct atm_vcc *vcc = atm_sk(state->sk);
vcc_info(seq, vcc);
}
return 0;
}
static const struct seq_operations vcc_seq_ops = {
.start = vcc_seq_start,
.next = vcc_seq_next,
.stop = vcc_seq_stop,
.show = vcc_seq_show,
};
static int svc_seq_show(struct seq_file *seq, void *v)
{
static const char atm_svc_banner[] =
"Itf VPI VCI State Remote\n";
if (v == SEQ_START_TOKEN)
seq_puts(seq, atm_svc_banner);
else {
struct vcc_state *state = seq->private;
struct atm_vcc *vcc = atm_sk(state->sk);
svc_info(seq, vcc);
}
return 0;
}
static const struct seq_operations svc_seq_ops = {
.start = vcc_seq_start,
.next = vcc_seq_next,
.stop = vcc_seq_stop,
.show = svc_seq_show,
};
static ssize_t proc_dev_atm_read(struct file *file, char __user *buf,
size_t count, loff_t *pos)
{
struct atm_dev *dev;
unsigned long page;
int length;
if (count == 0)
return 0;
page = get_zeroed_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
dev = pde_data(file_inode(file));
if (!dev->ops->proc_read)
length = -EINVAL;
else {
length = dev->ops->proc_read(dev, pos, (char *)page);
if (length > count)
length = -EINVAL;
}
if (length >= 0) {
if (copy_to_user(buf, (char *)page, length))
length = -EFAULT;
(*pos)++;
}
free_page(page);
return length;
}
struct proc_dir_entry *atm_proc_root;
EXPORT_SYMBOL(atm_proc_root);
int atm_proc_dev_register(struct atm_dev *dev)
{
int error;
/* No proc info */
if (!dev->ops->proc_read)
return 0;
error = -ENOMEM;
dev->proc_name = kasprintf(GFP_KERNEL, "%s:%d", dev->type, dev->number);
if (!dev->proc_name)
goto err_out;
dev->proc_entry = proc_create_data(dev->proc_name, 0, atm_proc_root,
&atm_dev_proc_ops, dev);
if (!dev->proc_entry)
goto err_free_name;
return 0;
err_free_name:
kfree(dev->proc_name);
err_out:
return error;
}
void atm_proc_dev_deregister(struct atm_dev *dev)
{
if (!dev->ops->proc_read)
return;
remove_proc_entry(dev->proc_name, atm_proc_root);
kfree(dev->proc_name);
}
int __init atm_proc_init(void)
{
atm_proc_root = proc_net_mkdir(&init_net, "atm", init_net.proc_net);
if (!atm_proc_root)
return -ENOMEM;
proc_create_seq("devices", 0444, atm_proc_root, &atm_dev_seq_ops);
proc_create_seq_private("pvc", 0444, atm_proc_root, &pvc_seq_ops,
sizeof(struct vcc_state), (void *)(uintptr_t)PF_ATMPVC);
proc_create_seq_private("svc", 0444, atm_proc_root, &svc_seq_ops,
sizeof(struct vcc_state), (void *)(uintptr_t)PF_ATMSVC);
proc_create_seq_private("vc", 0444, atm_proc_root, &vcc_seq_ops,
sizeof(struct vcc_state), NULL);
return 0;
}
void atm_proc_exit(void)
{
remove_proc_subtree("atm", init_net.proc_net);
}
| linux-master | net/atm/proc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Ethernet netdevice using ATM AAL5 as underlying carrier
* (RFC1483 obsoleted by RFC2684) for Linux
*
* Authors: Marcell GAL, 2000, XDSL Ltd, Hungary
* Eric Kinzie, 2006-2007, US Naval Research Laboratory
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include <linux/ip.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <net/arp.h>
#include <linux/atm.h>
#include <linux/atmdev.h>
#include <linux/capability.h>
#include <linux/seq_file.h>
#include <linux/atmbr2684.h>
#include "common.h"
static void skb_debug(const struct sk_buff *skb)
{
#ifdef SKB_DEBUG
#define NUM2PRINT 50
print_hex_dump(KERN_DEBUG, "br2684: skb: ", DUMP_OFFSET,
16, 1, skb->data, min(NUM2PRINT, skb->len), true);
#endif
}
#define BR2684_ETHERTYPE_LEN 2
#define BR2684_PAD_LEN 2
#define LLC 0xaa, 0xaa, 0x03
#define SNAP_BRIDGED 0x00, 0x80, 0xc2
#define SNAP_ROUTED 0x00, 0x00, 0x00
#define PID_ETHERNET 0x00, 0x07
#define ETHERTYPE_IPV4 0x08, 0x00
#define ETHERTYPE_IPV6 0x86, 0xdd
#define PAD_BRIDGED 0x00, 0x00
static const unsigned char ethertype_ipv4[] = { ETHERTYPE_IPV4 };
static const unsigned char ethertype_ipv6[] = { ETHERTYPE_IPV6 };
static const unsigned char llc_oui_pid_pad[] =
{ LLC, SNAP_BRIDGED, PID_ETHERNET, PAD_BRIDGED };
static const unsigned char pad[] = { PAD_BRIDGED };
static const unsigned char llc_oui_ipv4[] = { LLC, SNAP_ROUTED, ETHERTYPE_IPV4 };
static const unsigned char llc_oui_ipv6[] = { LLC, SNAP_ROUTED, ETHERTYPE_IPV6 };
enum br2684_encaps {
e_vc = BR2684_ENCAPS_VC,
e_llc = BR2684_ENCAPS_LLC,
};
struct br2684_vcc {
struct atm_vcc *atmvcc;
struct net_device *device;
/* keep old push, pop functions for chaining */
void (*old_push)(struct atm_vcc *vcc, struct sk_buff *skb);
void (*old_pop)(struct atm_vcc *vcc, struct sk_buff *skb);
void (*old_release_cb)(struct atm_vcc *vcc);
struct module *old_owner;
enum br2684_encaps encaps;
struct list_head brvccs;
#ifdef CONFIG_ATM_BR2684_IPFILTER
struct br2684_filter filter;
#endif /* CONFIG_ATM_BR2684_IPFILTER */
unsigned int copies_needed, copies_failed;
atomic_t qspace;
};
struct br2684_dev {
struct net_device *net_dev;
struct list_head br2684_devs;
int number;
struct list_head brvccs; /* one device <=> one vcc (before xmas) */
int mac_was_set;
enum br2684_payload payload;
};
/*
* This lock should be held for writing any time the list of devices or
* their attached vcc's could be altered. It should be held for reading
* any time these are being queried. Note that we sometimes need to
* do read-locking under interrupting context, so write locking must block
* the current CPU's interrupts.
*/
static DEFINE_RWLOCK(devs_lock);
static LIST_HEAD(br2684_devs);
static inline struct br2684_dev *BRPRIV(const struct net_device *net_dev)
{
return netdev_priv(net_dev);
}
static inline struct net_device *list_entry_brdev(const struct list_head *le)
{
return list_entry(le, struct br2684_dev, br2684_devs)->net_dev;
}
static inline struct br2684_vcc *BR2684_VCC(const struct atm_vcc *atmvcc)
{
return (struct br2684_vcc *)(atmvcc->user_back);
}
static inline struct br2684_vcc *list_entry_brvcc(const struct list_head *le)
{
return list_entry(le, struct br2684_vcc, brvccs);
}
/* Caller should hold read_lock(&devs_lock) */
static struct net_device *br2684_find_dev(const struct br2684_if_spec *s)
{
struct list_head *lh;
struct net_device *net_dev;
switch (s->method) {
case BR2684_FIND_BYNUM:
list_for_each(lh, &br2684_devs) {
net_dev = list_entry_brdev(lh);
if (BRPRIV(net_dev)->number == s->spec.devnum)
return net_dev;
}
break;
case BR2684_FIND_BYIFNAME:
list_for_each(lh, &br2684_devs) {
net_dev = list_entry_brdev(lh);
if (!strncmp(net_dev->name, s->spec.ifname, IFNAMSIZ))
return net_dev;
}
break;
}
return NULL;
}
static int atm_dev_event(struct notifier_block *this, unsigned long event,
void *arg)
{
struct atm_dev *atm_dev = arg;
struct list_head *lh;
struct net_device *net_dev;
struct br2684_vcc *brvcc;
struct atm_vcc *atm_vcc;
unsigned long flags;
pr_debug("event=%ld dev=%p\n", event, atm_dev);
read_lock_irqsave(&devs_lock, flags);
list_for_each(lh, &br2684_devs) {
net_dev = list_entry_brdev(lh);
list_for_each_entry(brvcc, &BRPRIV(net_dev)->brvccs, brvccs) {
atm_vcc = brvcc->atmvcc;
if (atm_vcc && brvcc->atmvcc->dev == atm_dev) {
if (atm_vcc->dev->signal == ATM_PHY_SIG_LOST)
netif_carrier_off(net_dev);
else
netif_carrier_on(net_dev);
}
}
}
read_unlock_irqrestore(&devs_lock, flags);
return NOTIFY_DONE;
}
static struct notifier_block atm_dev_notifier = {
.notifier_call = atm_dev_event,
};
/* chained vcc->pop function. Check if we should wake the netif_queue */
static void br2684_pop(struct atm_vcc *vcc, struct sk_buff *skb)
{
struct br2684_vcc *brvcc = BR2684_VCC(vcc);
pr_debug("(vcc %p ; net_dev %p )\n", vcc, brvcc->device);
brvcc->old_pop(vcc, skb);
/* If the queue space just went up from zero, wake */
if (atomic_inc_return(&brvcc->qspace) == 1)
netif_wake_queue(brvcc->device);
}
/*
* Send a packet out a particular vcc. Not to useful right now, but paves
* the way for multiple vcc's per itf. Returns true if we can send,
* otherwise false
*/
static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev,
struct br2684_vcc *brvcc)
{
struct br2684_dev *brdev = BRPRIV(dev);
struct atm_vcc *atmvcc;
int minheadroom = (brvcc->encaps == e_llc) ?
((brdev->payload == p_bridged) ?
sizeof(llc_oui_pid_pad) : sizeof(llc_oui_ipv4)) :
((brdev->payload == p_bridged) ? BR2684_PAD_LEN : 0);
if (skb_headroom(skb) < minheadroom) {
struct sk_buff *skb2 = skb_realloc_headroom(skb, minheadroom);
brvcc->copies_needed++;
dev_kfree_skb(skb);
if (skb2 == NULL) {
brvcc->copies_failed++;
return 0;
}
skb = skb2;
}
if (brvcc->encaps == e_llc) {
if (brdev->payload == p_bridged) {
skb_push(skb, sizeof(llc_oui_pid_pad));
skb_copy_to_linear_data(skb, llc_oui_pid_pad,
sizeof(llc_oui_pid_pad));
} else if (brdev->payload == p_routed) {
unsigned short prot = ntohs(skb->protocol);
skb_push(skb, sizeof(llc_oui_ipv4));
switch (prot) {
case ETH_P_IP:
skb_copy_to_linear_data(skb, llc_oui_ipv4,
sizeof(llc_oui_ipv4));
break;
case ETH_P_IPV6:
skb_copy_to_linear_data(skb, llc_oui_ipv6,
sizeof(llc_oui_ipv6));
break;
default:
dev_kfree_skb(skb);
return 0;
}
}
} else { /* e_vc */
if (brdev->payload == p_bridged) {
skb_push(skb, 2);
memset(skb->data, 0, 2);
}
}
skb_debug(skb);
ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc;
pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev);
atm_account_tx(atmvcc, skb);
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
if (atomic_dec_return(&brvcc->qspace) < 1) {
/* No more please! */
netif_stop_queue(brvcc->device);
/* We might have raced with br2684_pop() */
if (unlikely(atomic_read(&brvcc->qspace) > 0))
netif_wake_queue(brvcc->device);
}
/* If this fails immediately, the skb will be freed and br2684_pop()
will wake the queue if appropriate. Just return an error so that
the stats are updated correctly */
return !atmvcc->send(atmvcc, skb);
}
static void br2684_release_cb(struct atm_vcc *atmvcc)
{
struct br2684_vcc *brvcc = BR2684_VCC(atmvcc);
if (atomic_read(&brvcc->qspace) > 0)
netif_wake_queue(brvcc->device);
if (brvcc->old_release_cb)
brvcc->old_release_cb(atmvcc);
}
static inline struct br2684_vcc *pick_outgoing_vcc(const struct sk_buff *skb,
const struct br2684_dev *brdev)
{
return list_empty(&brdev->brvccs) ? NULL : list_entry_brvcc(brdev->brvccs.next); /* 1 vcc/dev right now */
}
static netdev_tx_t br2684_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct br2684_dev *brdev = BRPRIV(dev);
struct br2684_vcc *brvcc;
struct atm_vcc *atmvcc;
netdev_tx_t ret = NETDEV_TX_OK;
pr_debug("skb_dst(skb)=%p\n", skb_dst(skb));
read_lock(&devs_lock);
brvcc = pick_outgoing_vcc(skb, brdev);
if (brvcc == NULL) {
pr_debug("no vcc attached to dev %s\n", dev->name);
dev->stats.tx_errors++;
dev->stats.tx_carrier_errors++;
/* netif_stop_queue(dev); */
dev_kfree_skb(skb);
goto out_devs;
}
atmvcc = brvcc->atmvcc;
bh_lock_sock(sk_atm(atmvcc));
if (test_bit(ATM_VF_RELEASED, &atmvcc->flags) ||
test_bit(ATM_VF_CLOSE, &atmvcc->flags) ||
!test_bit(ATM_VF_READY, &atmvcc->flags)) {
dev->stats.tx_dropped++;
dev_kfree_skb(skb);
goto out;
}
if (sock_owned_by_user(sk_atm(atmvcc))) {
netif_stop_queue(brvcc->device);
ret = NETDEV_TX_BUSY;
goto out;
}
if (!br2684_xmit_vcc(skb, dev, brvcc)) {
/*
* We should probably use netif_*_queue() here, but that
* involves added complication. We need to walk before
* we can run.
*
* Don't free here! this pointer might be no longer valid!
*/
dev->stats.tx_errors++;
dev->stats.tx_fifo_errors++;
}
out:
bh_unlock_sock(sk_atm(atmvcc));
out_devs:
read_unlock(&devs_lock);
return ret;
}
/*
* We remember when the MAC gets set, so we don't override it later with
* the ESI of the ATM card of the first VC
*/
static int br2684_mac_addr(struct net_device *dev, void *p)
{
int err = eth_mac_addr(dev, p);
if (!err)
BRPRIV(dev)->mac_was_set = 1;
return err;
}
#ifdef CONFIG_ATM_BR2684_IPFILTER
/* this IOCTL is experimental. */
static int br2684_setfilt(struct atm_vcc *atmvcc, void __user * arg)
{
struct br2684_vcc *brvcc;
struct br2684_filter_set fs;
if (copy_from_user(&fs, arg, sizeof fs))
return -EFAULT;
if (fs.ifspec.method != BR2684_FIND_BYNOTHING) {
/*
* This is really a per-vcc thing, but we can also search
* by device.
*/
struct br2684_dev *brdev;
read_lock(&devs_lock);
brdev = BRPRIV(br2684_find_dev(&fs.ifspec));
if (brdev == NULL || list_empty(&brdev->brvccs) ||
brdev->brvccs.next != brdev->brvccs.prev) /* >1 VCC */
brvcc = NULL;
else
brvcc = list_entry_brvcc(brdev->brvccs.next);
read_unlock(&devs_lock);
if (brvcc == NULL)
return -ESRCH;
} else
brvcc = BR2684_VCC(atmvcc);
memcpy(&brvcc->filter, &fs.filter, sizeof(brvcc->filter));
return 0;
}
/* Returns 1 if packet should be dropped */
static inline int
packet_fails_filter(__be16 type, struct br2684_vcc *brvcc, struct sk_buff *skb)
{
if (brvcc->filter.netmask == 0)
return 0; /* no filter in place */
if (type == htons(ETH_P_IP) &&
(((struct iphdr *)(skb->data))->daddr & brvcc->filter.
netmask) == brvcc->filter.prefix)
return 0;
if (type == htons(ETH_P_ARP))
return 0;
/*
* TODO: we should probably filter ARPs too.. don't want to have
* them returning values that don't make sense, or is that ok?
*/
return 1; /* drop */
}
#endif /* CONFIG_ATM_BR2684_IPFILTER */
static void br2684_close_vcc(struct br2684_vcc *brvcc)
{
pr_debug("removing VCC %p from dev %p\n", brvcc, brvcc->device);
write_lock_irq(&devs_lock);
list_del(&brvcc->brvccs);
write_unlock_irq(&devs_lock);
brvcc->atmvcc->user_back = NULL; /* what about vcc->recvq ??? */
brvcc->atmvcc->release_cb = brvcc->old_release_cb;
brvcc->old_push(brvcc->atmvcc, NULL); /* pass on the bad news */
module_put(brvcc->old_owner);
kfree(brvcc);
}
/* when AAL5 PDU comes in: */
static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
{
struct br2684_vcc *brvcc = BR2684_VCC(atmvcc);
struct net_device *net_dev = brvcc->device;
struct br2684_dev *brdev = BRPRIV(net_dev);
pr_debug("\n");
if (unlikely(skb == NULL)) {
/* skb==NULL means VCC is being destroyed */
br2684_close_vcc(brvcc);
if (list_empty(&brdev->brvccs)) {
write_lock_irq(&devs_lock);
list_del(&brdev->br2684_devs);
write_unlock_irq(&devs_lock);
unregister_netdev(net_dev);
free_netdev(net_dev);
}
return;
}
skb_debug(skb);
atm_return(atmvcc, skb->truesize);
pr_debug("skb from brdev %p\n", brdev);
if (brvcc->encaps == e_llc) {
if (skb->len > 7 && skb->data[7] == 0x01)
__skb_trim(skb, skb->len - 4);
/* accept packets that have "ipv[46]" in the snap header */
if ((skb->len >= (sizeof(llc_oui_ipv4))) &&
(memcmp(skb->data, llc_oui_ipv4,
sizeof(llc_oui_ipv4) - BR2684_ETHERTYPE_LEN) == 0)) {
if (memcmp(skb->data + 6, ethertype_ipv6,
sizeof(ethertype_ipv6)) == 0)
skb->protocol = htons(ETH_P_IPV6);
else if (memcmp(skb->data + 6, ethertype_ipv4,
sizeof(ethertype_ipv4)) == 0)
skb->protocol = htons(ETH_P_IP);
else
goto error;
skb_pull(skb, sizeof(llc_oui_ipv4));
skb_reset_network_header(skb);
skb->pkt_type = PACKET_HOST;
/*
* Let us waste some time for checking the encapsulation.
* Note, that only 7 char is checked so frames with a valid FCS
* are also accepted (but FCS is not checked of course).
*/
} else if ((skb->len >= sizeof(llc_oui_pid_pad)) &&
(memcmp(skb->data, llc_oui_pid_pad, 7) == 0)) {
skb_pull(skb, sizeof(llc_oui_pid_pad));
skb->protocol = eth_type_trans(skb, net_dev);
} else
goto error;
} else { /* e_vc */
if (brdev->payload == p_routed) {
struct iphdr *iph;
skb_reset_network_header(skb);
iph = ip_hdr(skb);
if (iph->version == 4)
skb->protocol = htons(ETH_P_IP);
else if (iph->version == 6)
skb->protocol = htons(ETH_P_IPV6);
else
goto error;
skb->pkt_type = PACKET_HOST;
} else { /* p_bridged */
/* first 2 chars should be 0 */
if (memcmp(skb->data, pad, BR2684_PAD_LEN) != 0)
goto error;
skb_pull(skb, BR2684_PAD_LEN);
skb->protocol = eth_type_trans(skb, net_dev);
}
}
#ifdef CONFIG_ATM_BR2684_IPFILTER
if (unlikely(packet_fails_filter(skb->protocol, brvcc, skb)))
goto dropped;
#endif /* CONFIG_ATM_BR2684_IPFILTER */
skb->dev = net_dev;
ATM_SKB(skb)->vcc = atmvcc; /* needed ? */
pr_debug("received packet's protocol: %x\n", ntohs(skb->protocol));
skb_debug(skb);
/* sigh, interface is down? */
if (unlikely(!(net_dev->flags & IFF_UP)))
goto dropped;
net_dev->stats.rx_packets++;
net_dev->stats.rx_bytes += skb->len;
memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data));
netif_rx(skb);
return;
dropped:
net_dev->stats.rx_dropped++;
goto free_skb;
error:
net_dev->stats.rx_errors++;
free_skb:
dev_kfree_skb(skb);
}
/*
* Assign a vcc to a dev
* Note: we do not have explicit unassign, but look at _push()
*/
static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
{
struct br2684_vcc *brvcc;
struct br2684_dev *brdev;
struct net_device *net_dev;
struct atm_backend_br2684 be;
int err;
if (copy_from_user(&be, arg, sizeof be))
return -EFAULT;
brvcc = kzalloc(sizeof(struct br2684_vcc), GFP_KERNEL);
if (!brvcc)
return -ENOMEM;
/*
* Allow two packets in the ATM queue. One actually being sent, and one
* for the ATM 'TX done' handler to send. It shouldn't take long to get
* the next one from the netdev queue, when we need it. More than that
* would be bufferbloat.
*/
atomic_set(&brvcc->qspace, 2);
write_lock_irq(&devs_lock);
net_dev = br2684_find_dev(&be.ifspec);
if (net_dev == NULL) {
pr_err("tried to attach to non-existent device\n");
err = -ENXIO;
goto error;
}
brdev = BRPRIV(net_dev);
if (atmvcc->push == NULL) {
err = -EBADFD;
goto error;
}
if (!list_empty(&brdev->brvccs)) {
/* Only 1 VCC/dev right now */
err = -EEXIST;
goto error;
}
if (be.fcs_in != BR2684_FCSIN_NO ||
be.fcs_out != BR2684_FCSOUT_NO ||
be.fcs_auto || be.has_vpiid || be.send_padding ||
(be.encaps != BR2684_ENCAPS_VC &&
be.encaps != BR2684_ENCAPS_LLC) ||
be.min_size != 0) {
err = -EINVAL;
goto error;
}
pr_debug("vcc=%p, encaps=%d, brvcc=%p\n", atmvcc, be.encaps, brvcc);
if (list_empty(&brdev->brvccs) && !brdev->mac_was_set) {
unsigned char *esi = atmvcc->dev->esi;
const u8 one = 1;
if (esi[0] | esi[1] | esi[2] | esi[3] | esi[4] | esi[5])
dev_addr_set(net_dev, esi);
else
dev_addr_mod(net_dev, 2, &one, 1);
}
list_add(&brvcc->brvccs, &brdev->brvccs);
write_unlock_irq(&devs_lock);
brvcc->device = net_dev;
brvcc->atmvcc = atmvcc;
atmvcc->user_back = brvcc;
brvcc->encaps = (enum br2684_encaps)be.encaps;
brvcc->old_push = atmvcc->push;
brvcc->old_pop = atmvcc->pop;
brvcc->old_release_cb = atmvcc->release_cb;
brvcc->old_owner = atmvcc->owner;
barrier();
atmvcc->push = br2684_push;
atmvcc->pop = br2684_pop;
atmvcc->release_cb = br2684_release_cb;
atmvcc->owner = THIS_MODULE;
/* initialize netdev carrier state */
if (atmvcc->dev->signal == ATM_PHY_SIG_LOST)
netif_carrier_off(net_dev);
else
netif_carrier_on(net_dev);
__module_get(THIS_MODULE);
/* re-process everything received between connection setup and
backend setup */
vcc_process_recv_queue(atmvcc);
return 0;
error:
write_unlock_irq(&devs_lock);
kfree(brvcc);
return err;
}
static const struct net_device_ops br2684_netdev_ops = {
.ndo_start_xmit = br2684_start_xmit,
.ndo_set_mac_address = br2684_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
static const struct net_device_ops br2684_netdev_ops_routed = {
.ndo_start_xmit = br2684_start_xmit,
.ndo_set_mac_address = br2684_mac_addr,
};
static void br2684_setup(struct net_device *netdev)
{
struct br2684_dev *brdev = BRPRIV(netdev);
ether_setup(netdev);
netdev->hard_header_len += sizeof(llc_oui_pid_pad); /* worst case */
brdev->net_dev = netdev;
netdev->netdev_ops = &br2684_netdev_ops;
INIT_LIST_HEAD(&brdev->brvccs);
}
static void br2684_setup_routed(struct net_device *netdev)
{
struct br2684_dev *brdev = BRPRIV(netdev);
brdev->net_dev = netdev;
netdev->hard_header_len = sizeof(llc_oui_ipv4); /* worst case */
netdev->netdev_ops = &br2684_netdev_ops_routed;
netdev->addr_len = 0;
netdev->mtu = ETH_DATA_LEN;
netdev->min_mtu = 0;
netdev->max_mtu = ETH_MAX_MTU;
netdev->type = ARPHRD_PPP;
netdev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
netdev->tx_queue_len = 100;
INIT_LIST_HEAD(&brdev->brvccs);
}
static int br2684_create(void __user *arg)
{
int err;
struct net_device *netdev;
struct br2684_dev *brdev;
struct atm_newif_br2684 ni;
enum br2684_payload payload;
pr_debug("\n");
if (copy_from_user(&ni, arg, sizeof ni))
return -EFAULT;
if (ni.media & BR2684_FLAG_ROUTED)
payload = p_routed;
else
payload = p_bridged;
ni.media &= 0xffff; /* strip flags */
if (ni.media != BR2684_MEDIA_ETHERNET || ni.mtu != 1500)
return -EINVAL;
netdev = alloc_netdev(sizeof(struct br2684_dev),
ni.ifname[0] ? ni.ifname : "nas%d",
NET_NAME_UNKNOWN,
(payload == p_routed) ? br2684_setup_routed : br2684_setup);
if (!netdev)
return -ENOMEM;
brdev = BRPRIV(netdev);
pr_debug("registered netdev %s\n", netdev->name);
/* open, stop, do_ioctl ? */
err = register_netdev(netdev);
if (err < 0) {
pr_err("register_netdev failed\n");
free_netdev(netdev);
return err;
}
write_lock_irq(&devs_lock);
brdev->payload = payload;
if (list_empty(&br2684_devs)) {
/* 1st br2684 device */
brdev->number = 1;
} else
brdev->number = BRPRIV(list_entry_brdev(br2684_devs.prev))->number + 1;
list_add_tail(&brdev->br2684_devs, &br2684_devs);
write_unlock_irq(&devs_lock);
return 0;
}
/*
* This handles ioctls actually performed on our vcc - we must return
* -ENOIOCTLCMD for any unrecognized ioctl
*/
static int br2684_ioctl(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
struct atm_vcc *atmvcc = ATM_SD(sock);
void __user *argp = (void __user *)arg;
atm_backend_t b;
int err;
switch (cmd) {
case ATM_SETBACKEND:
case ATM_NEWBACKENDIF:
err = get_user(b, (atm_backend_t __user *) argp);
if (err)
return -EFAULT;
if (b != ATM_BACKEND_BR2684)
return -ENOIOCTLCMD;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (cmd == ATM_SETBACKEND) {
if (sock->state != SS_CONNECTED)
return -EINVAL;
return br2684_regvcc(atmvcc, argp);
} else {
return br2684_create(argp);
}
#ifdef CONFIG_ATM_BR2684_IPFILTER
case BR2684_SETFILT:
if (atmvcc->push != br2684_push)
return -ENOIOCTLCMD;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
err = br2684_setfilt(atmvcc, argp);
return err;
#endif /* CONFIG_ATM_BR2684_IPFILTER */
}
return -ENOIOCTLCMD;
}
static struct atm_ioctl br2684_ioctl_ops = {
.owner = THIS_MODULE,
.ioctl = br2684_ioctl,
};
#ifdef CONFIG_PROC_FS
static void *br2684_seq_start(struct seq_file *seq, loff_t * pos)
__acquires(devs_lock)
{
read_lock(&devs_lock);
return seq_list_start(&br2684_devs, *pos);
}
static void *br2684_seq_next(struct seq_file *seq, void *v, loff_t * pos)
{
return seq_list_next(v, &br2684_devs, pos);
}
static void br2684_seq_stop(struct seq_file *seq, void *v)
__releases(devs_lock)
{
read_unlock(&devs_lock);
}
static int br2684_seq_show(struct seq_file *seq, void *v)
{
const struct br2684_dev *brdev = list_entry(v, struct br2684_dev,
br2684_devs);
const struct net_device *net_dev = brdev->net_dev;
const struct br2684_vcc *brvcc;
seq_printf(seq, "dev %.16s: num=%d, mac=%pM (%s)\n",
net_dev->name,
brdev->number,
net_dev->dev_addr,
brdev->mac_was_set ? "set" : "auto");
list_for_each_entry(brvcc, &brdev->brvccs, brvccs) {
seq_printf(seq, " vcc %d.%d.%d: encaps=%s payload=%s"
", failed copies %u/%u"
"\n", brvcc->atmvcc->dev->number,
brvcc->atmvcc->vpi, brvcc->atmvcc->vci,
(brvcc->encaps == e_llc) ? "LLC" : "VC",
(brdev->payload == p_bridged) ? "bridged" : "routed",
brvcc->copies_failed, brvcc->copies_needed);
#ifdef CONFIG_ATM_BR2684_IPFILTER
if (brvcc->filter.netmask != 0)
seq_printf(seq, " filter=%pI4/%pI4\n",
&brvcc->filter.prefix,
&brvcc->filter.netmask);
#endif /* CONFIG_ATM_BR2684_IPFILTER */
}
return 0;
}
static const struct seq_operations br2684_seq_ops = {
.start = br2684_seq_start,
.next = br2684_seq_next,
.stop = br2684_seq_stop,
.show = br2684_seq_show,
};
extern struct proc_dir_entry *atm_proc_root; /* from proc.c */
#endif /* CONFIG_PROC_FS */
static int __init br2684_init(void)
{
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *p;
p = proc_create_seq("br2684", 0, atm_proc_root, &br2684_seq_ops);
if (p == NULL)
return -ENOMEM;
#endif
register_atm_ioctl(&br2684_ioctl_ops);
register_atmdevice_notifier(&atm_dev_notifier);
return 0;
}
static void __exit br2684_exit(void)
{
struct net_device *net_dev;
struct br2684_dev *brdev;
struct br2684_vcc *brvcc;
deregister_atm_ioctl(&br2684_ioctl_ops);
#ifdef CONFIG_PROC_FS
remove_proc_entry("br2684", atm_proc_root);
#endif
unregister_atmdevice_notifier(&atm_dev_notifier);
while (!list_empty(&br2684_devs)) {
net_dev = list_entry_brdev(br2684_devs.next);
brdev = BRPRIV(net_dev);
while (!list_empty(&brdev->brvccs)) {
brvcc = list_entry_brvcc(brdev->brvccs.next);
br2684_close_vcc(brvcc);
}
list_del(&brdev->br2684_devs);
unregister_netdev(net_dev);
free_netdev(net_dev);
}
}
module_init(br2684_init);
module_exit(br2684_exit);
MODULE_AUTHOR("Marcell GAL");
MODULE_DESCRIPTION("RFC2684 bridged protocols over ATM/AAL5");
MODULE_LICENSE("GPL");
| linux-master | net/atm/br2684.c |
// SPDX-License-Identifier: GPL-2.0
/* net/atm/svc.c - ATM SVC sockets */
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/string.h>
#include <linux/net.h> /* struct socket, struct proto_ops */
#include <linux/errno.h> /* error codes */
#include <linux/kernel.h> /* printk */
#include <linux/skbuff.h>
#include <linux/wait.h>
#include <linux/sched/signal.h>
#include <linux/fcntl.h> /* O_NONBLOCK */
#include <linux/init.h>
#include <linux/atm.h> /* ATM stuff */
#include <linux/atmsap.h>
#include <linux/atmsvc.h>
#include <linux/atmdev.h>
#include <linux/bitops.h>
#include <net/sock.h> /* for sock_no_* */
#include <linux/uaccess.h>
#include <linux/export.h>
#include "resources.h"
#include "common.h" /* common for PVCs and SVCs */
#include "signaling.h"
#include "addr.h"
#ifdef CONFIG_COMPAT
/* It actually takes struct sockaddr_atmsvc, not struct atm_iobuf */
#define COMPAT_ATM_ADDPARTY _IOW('a', ATMIOC_SPECIAL + 4, struct compat_atm_iobuf)
#endif
static int svc_create(struct net *net, struct socket *sock, int protocol,
int kern);
/*
* Note: since all this is still nicely synchronized with the signaling demon,
* there's no need to protect sleep loops with clis. If signaling is
* moved into the kernel, that would change.
*/
static int svc_shutdown(struct socket *sock, int how)
{
return 0;
}
static void svc_disconnect(struct atm_vcc *vcc)
{
DEFINE_WAIT(wait);
struct sk_buff *skb;
struct sock *sk = sk_atm(vcc);
pr_debug("%p\n", vcc);
if (test_bit(ATM_VF_REGIS, &vcc->flags)) {
sigd_enq(vcc, as_close, NULL, NULL, NULL);
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
if (test_bit(ATM_VF_RELEASED, &vcc->flags) || !sigd)
break;
schedule();
}
finish_wait(sk_sleep(sk), &wait);
}
/* beware - socket is still in use by atmsigd until the last
as_indicate has been answered */
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
atm_return(vcc, skb->truesize);
pr_debug("LISTEN REL\n");
sigd_enq2(NULL, as_reject, vcc, NULL, NULL, &vcc->qos, 0);
dev_kfree_skb(skb);
}
clear_bit(ATM_VF_REGIS, &vcc->flags);
/* ... may retry later */
}
static int svc_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct atm_vcc *vcc;
if (sk) {
vcc = ATM_SD(sock);
pr_debug("%p\n", vcc);
clear_bit(ATM_VF_READY, &vcc->flags);
/*
* VCC pointer is used as a reference,
* so we must not free it (thereby subjecting it to re-use)
* before all pending connections are closed
*/
svc_disconnect(vcc);
vcc_release(sock);
}
return 0;
}
static int svc_bind(struct socket *sock, struct sockaddr *sockaddr,
int sockaddr_len)
{
DEFINE_WAIT(wait);
struct sock *sk = sock->sk;
struct sockaddr_atmsvc *addr;
struct atm_vcc *vcc;
int error;
if (sockaddr_len != sizeof(struct sockaddr_atmsvc))
return -EINVAL;
lock_sock(sk);
if (sock->state == SS_CONNECTED) {
error = -EISCONN;
goto out;
}
if (sock->state != SS_UNCONNECTED) {
error = -EINVAL;
goto out;
}
vcc = ATM_SD(sock);
addr = (struct sockaddr_atmsvc *) sockaddr;
if (addr->sas_family != AF_ATMSVC) {
error = -EAFNOSUPPORT;
goto out;
}
clear_bit(ATM_VF_BOUND, &vcc->flags);
/* failing rebind will kill old binding */
/* @@@ check memory (de)allocation on rebind */
if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) {
error = -EBADFD;
goto out;
}
vcc->local = *addr;
set_bit(ATM_VF_WAITING, &vcc->flags);
sigd_enq(vcc, as_bind, NULL, NULL, &vcc->local);
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
if (!test_bit(ATM_VF_WAITING, &vcc->flags) || !sigd)
break;
schedule();
}
finish_wait(sk_sleep(sk), &wait);
clear_bit(ATM_VF_REGIS, &vcc->flags); /* doesn't count */
if (!sigd) {
error = -EUNATCH;
goto out;
}
if (!sk->sk_err)
set_bit(ATM_VF_BOUND, &vcc->flags);
error = -sk->sk_err;
out:
release_sock(sk);
return error;
}
static int svc_connect(struct socket *sock, struct sockaddr *sockaddr,
int sockaddr_len, int flags)
{
DEFINE_WAIT(wait);
struct sock *sk = sock->sk;
struct sockaddr_atmsvc *addr;
struct atm_vcc *vcc = ATM_SD(sock);
int error;
pr_debug("%p\n", vcc);
lock_sock(sk);
if (sockaddr_len != sizeof(struct sockaddr_atmsvc)) {
error = -EINVAL;
goto out;
}
switch (sock->state) {
default:
error = -EINVAL;
goto out;
case SS_CONNECTED:
error = -EISCONN;
goto out;
case SS_CONNECTING:
if (test_bit(ATM_VF_WAITING, &vcc->flags)) {
error = -EALREADY;
goto out;
}
sock->state = SS_UNCONNECTED;
if (sk->sk_err) {
error = -sk->sk_err;
goto out;
}
break;
case SS_UNCONNECTED:
addr = (struct sockaddr_atmsvc *) sockaddr;
if (addr->sas_family != AF_ATMSVC) {
error = -EAFNOSUPPORT;
goto out;
}
if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) {
error = -EBADFD;
goto out;
}
if (vcc->qos.txtp.traffic_class == ATM_ANYCLASS ||
vcc->qos.rxtp.traffic_class == ATM_ANYCLASS) {
error = -EINVAL;
goto out;
}
if (!vcc->qos.txtp.traffic_class &&
!vcc->qos.rxtp.traffic_class) {
error = -EINVAL;
goto out;
}
vcc->remote = *addr;
set_bit(ATM_VF_WAITING, &vcc->flags);
sigd_enq(vcc, as_connect, NULL, NULL, &vcc->remote);
if (flags & O_NONBLOCK) {
sock->state = SS_CONNECTING;
error = -EINPROGRESS;
goto out;
}
error = 0;
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
schedule();
if (!signal_pending(current)) {
prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
continue;
}
pr_debug("*ABORT*\n");
/*
* This is tricky:
* Kernel ---close--> Demon
* Kernel <--close--- Demon
* or
* Kernel ---close--> Demon
* Kernel <--error--- Demon
* or
* Kernel ---close--> Demon
* Kernel <--okay---- Demon
* Kernel <--close--- Demon
*/
sigd_enq(vcc, as_close, NULL, NULL, NULL);
while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
schedule();
}
if (!sk->sk_err)
while (!test_bit(ATM_VF_RELEASED, &vcc->flags) &&
sigd) {
prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
schedule();
}
clear_bit(ATM_VF_REGIS, &vcc->flags);
clear_bit(ATM_VF_RELEASED, &vcc->flags);
clear_bit(ATM_VF_CLOSE, &vcc->flags);
/* we're gone now but may connect later */
error = -EINTR;
break;
}
finish_wait(sk_sleep(sk), &wait);
if (error)
goto out;
if (!sigd) {
error = -EUNATCH;
goto out;
}
if (sk->sk_err) {
error = -sk->sk_err;
goto out;
}
}
vcc->qos.txtp.max_pcr = SELECT_TOP_PCR(vcc->qos.txtp);
vcc->qos.txtp.pcr = 0;
vcc->qos.txtp.min_pcr = 0;
error = vcc_connect(sock, vcc->itf, vcc->vpi, vcc->vci);
if (!error)
sock->state = SS_CONNECTED;
else
(void)svc_disconnect(vcc);
out:
release_sock(sk);
return error;
}
static int svc_listen(struct socket *sock, int backlog)
{
DEFINE_WAIT(wait);
struct sock *sk = sock->sk;
struct atm_vcc *vcc = ATM_SD(sock);
int error;
pr_debug("%p\n", vcc);
lock_sock(sk);
/* let server handle listen on unbound sockets */
if (test_bit(ATM_VF_SESSION, &vcc->flags)) {
error = -EINVAL;
goto out;
}
if (test_bit(ATM_VF_LISTEN, &vcc->flags)) {
error = -EADDRINUSE;
goto out;
}
set_bit(ATM_VF_WAITING, &vcc->flags);
sigd_enq(vcc, as_listen, NULL, NULL, &vcc->local);
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
if (!test_bit(ATM_VF_WAITING, &vcc->flags) || !sigd)
break;
schedule();
}
finish_wait(sk_sleep(sk), &wait);
if (!sigd) {
error = -EUNATCH;
goto out;
}
set_bit(ATM_VF_LISTEN, &vcc->flags);
vcc_insert_socket(sk);
sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT;
error = -sk->sk_err;
out:
release_sock(sk);
return error;
}
static int svc_accept(struct socket *sock, struct socket *newsock, int flags,
bool kern)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
struct atmsvc_msg *msg;
struct atm_vcc *old_vcc = ATM_SD(sock);
struct atm_vcc *new_vcc;
int error;
lock_sock(sk);
error = svc_create(sock_net(sk), newsock, 0, kern);
if (error)
goto out;
new_vcc = ATM_SD(newsock);
pr_debug("%p -> %p\n", old_vcc, new_vcc);
while (1) {
DEFINE_WAIT(wait);
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
while (!(skb = skb_dequeue(&sk->sk_receive_queue)) &&
sigd) {
if (test_bit(ATM_VF_RELEASED, &old_vcc->flags))
break;
if (test_bit(ATM_VF_CLOSE, &old_vcc->flags)) {
error = -sk->sk_err;
break;
}
if (flags & O_NONBLOCK) {
error = -EAGAIN;
break;
}
release_sock(sk);
schedule();
lock_sock(sk);
if (signal_pending(current)) {
error = -ERESTARTSYS;
break;
}
prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
}
finish_wait(sk_sleep(sk), &wait);
if (error)
goto out;
if (!skb) {
error = -EUNATCH;
goto out;
}
msg = (struct atmsvc_msg *)skb->data;
new_vcc->qos = msg->qos;
set_bit(ATM_VF_HASQOS, &new_vcc->flags);
new_vcc->remote = msg->svc;
new_vcc->local = msg->local;
new_vcc->sap = msg->sap;
error = vcc_connect(newsock, msg->pvc.sap_addr.itf,
msg->pvc.sap_addr.vpi,
msg->pvc.sap_addr.vci);
dev_kfree_skb(skb);
sk_acceptq_removed(sk);
if (error) {
sigd_enq2(NULL, as_reject, old_vcc, NULL, NULL,
&old_vcc->qos, error);
error = error == -EAGAIN ? -EBUSY : error;
goto out;
}
/* wait should be short, so we ignore the non-blocking flag */
set_bit(ATM_VF_WAITING, &new_vcc->flags);
sigd_enq(new_vcc, as_accept, old_vcc, NULL, NULL);
for (;;) {
prepare_to_wait(sk_sleep(sk_atm(new_vcc)), &wait,
TASK_UNINTERRUPTIBLE);
if (!test_bit(ATM_VF_WAITING, &new_vcc->flags) || !sigd)
break;
release_sock(sk);
schedule();
lock_sock(sk);
}
finish_wait(sk_sleep(sk_atm(new_vcc)), &wait);
if (!sigd) {
error = -EUNATCH;
goto out;
}
if (!sk_atm(new_vcc)->sk_err)
break;
if (sk_atm(new_vcc)->sk_err != ERESTARTSYS) {
error = -sk_atm(new_vcc)->sk_err;
goto out;
}
}
newsock->state = SS_CONNECTED;
out:
release_sock(sk);
return error;
}
static int svc_getname(struct socket *sock, struct sockaddr *sockaddr,
int peer)
{
struct sockaddr_atmsvc *addr;
addr = (struct sockaddr_atmsvc *) sockaddr;
memcpy(addr, peer ? &ATM_SD(sock)->remote : &ATM_SD(sock)->local,
sizeof(struct sockaddr_atmsvc));
return sizeof(struct sockaddr_atmsvc);
}
int svc_change_qos(struct atm_vcc *vcc, struct atm_qos *qos)
{
struct sock *sk = sk_atm(vcc);
DEFINE_WAIT(wait);
set_bit(ATM_VF_WAITING, &vcc->flags);
sigd_enq2(vcc, as_modify, NULL, NULL, &vcc->local, qos, 0);
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
if (!test_bit(ATM_VF_WAITING, &vcc->flags) ||
test_bit(ATM_VF_RELEASED, &vcc->flags) || !sigd) {
break;
}
schedule();
}
finish_wait(sk_sleep(sk), &wait);
if (!sigd)
return -EUNATCH;
return -sk->sk_err;
}
static int svc_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct atm_vcc *vcc = ATM_SD(sock);
int value, error = 0;
lock_sock(sk);
switch (optname) {
case SO_ATMSAP:
if (level != SOL_ATM || optlen != sizeof(struct atm_sap)) {
error = -EINVAL;
goto out;
}
if (copy_from_sockptr(&vcc->sap, optval, optlen)) {
error = -EFAULT;
goto out;
}
set_bit(ATM_VF_HASSAP, &vcc->flags);
break;
case SO_MULTIPOINT:
if (level != SOL_ATM || optlen != sizeof(int)) {
error = -EINVAL;
goto out;
}
if (copy_from_sockptr(&value, optval, sizeof(int))) {
error = -EFAULT;
goto out;
}
if (value == 1)
set_bit(ATM_VF_SESSION, &vcc->flags);
else if (value == 0)
clear_bit(ATM_VF_SESSION, &vcc->flags);
else
error = -EINVAL;
break;
default:
error = vcc_setsockopt(sock, level, optname, optval, optlen);
}
out:
release_sock(sk);
return error;
}
static int svc_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
int error = 0, len;
lock_sock(sk);
if (!__SO_LEVEL_MATCH(optname, level) || optname != SO_ATMSAP) {
error = vcc_getsockopt(sock, level, optname, optval, optlen);
goto out;
}
if (get_user(len, optlen)) {
error = -EFAULT;
goto out;
}
if (len != sizeof(struct atm_sap)) {
error = -EINVAL;
goto out;
}
if (copy_to_user(optval, &ATM_SD(sock)->sap, sizeof(struct atm_sap))) {
error = -EFAULT;
goto out;
}
out:
release_sock(sk);
return error;
}
static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr,
int sockaddr_len, int flags)
{
DEFINE_WAIT(wait);
struct sock *sk = sock->sk;
struct atm_vcc *vcc = ATM_SD(sock);
int error;
lock_sock(sk);
set_bit(ATM_VF_WAITING, &vcc->flags);
sigd_enq(vcc, as_addparty, NULL, NULL,
(struct sockaddr_atmsvc *) sockaddr);
if (flags & O_NONBLOCK) {
error = -EINPROGRESS;
goto out;
}
pr_debug("added wait queue\n");
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (!test_bit(ATM_VF_WAITING, &vcc->flags) || !sigd)
break;
schedule();
}
finish_wait(sk_sleep(sk), &wait);
error = -xchg(&sk->sk_err_soft, 0);
out:
release_sock(sk);
return error;
}
static int svc_dropparty(struct socket *sock, int ep_ref)
{
DEFINE_WAIT(wait);
struct sock *sk = sock->sk;
struct atm_vcc *vcc = ATM_SD(sock);
int error;
lock_sock(sk);
set_bit(ATM_VF_WAITING, &vcc->flags);
sigd_enq2(vcc, as_dropparty, NULL, NULL, NULL, NULL, ep_ref);
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (!test_bit(ATM_VF_WAITING, &vcc->flags) || !sigd)
break;
schedule();
}
finish_wait(sk_sleep(sk), &wait);
if (!sigd) {
error = -EUNATCH;
goto out;
}
error = -xchg(&sk->sk_err_soft, 0);
out:
release_sock(sk);
return error;
}
static int svc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
int error, ep_ref;
struct sockaddr_atmsvc sa;
struct atm_vcc *vcc = ATM_SD(sock);
switch (cmd) {
case ATM_ADDPARTY:
if (!test_bit(ATM_VF_SESSION, &vcc->flags))
return -EINVAL;
if (copy_from_user(&sa, (void __user *) arg, sizeof(sa)))
return -EFAULT;
error = svc_addparty(sock, (struct sockaddr *)&sa, sizeof(sa),
0);
break;
case ATM_DROPPARTY:
if (!test_bit(ATM_VF_SESSION, &vcc->flags))
return -EINVAL;
if (copy_from_user(&ep_ref, (void __user *) arg, sizeof(int)))
return -EFAULT;
error = svc_dropparty(sock, ep_ref);
break;
default:
error = vcc_ioctl(sock, cmd, arg);
}
return error;
}
#ifdef CONFIG_COMPAT
static int svc_compat_ioctl(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
/* The definition of ATM_ADDPARTY uses the size of struct atm_iobuf.
But actually it takes a struct sockaddr_atmsvc, which doesn't need
compat handling. So all we have to do is fix up cmd... */
if (cmd == COMPAT_ATM_ADDPARTY)
cmd = ATM_ADDPARTY;
if (cmd == ATM_ADDPARTY || cmd == ATM_DROPPARTY)
return svc_ioctl(sock, cmd, arg);
else
return vcc_compat_ioctl(sock, cmd, arg);
}
#endif /* CONFIG_COMPAT */
static const struct proto_ops svc_proto_ops = {
.family = PF_ATMSVC,
.owner = THIS_MODULE,
.release = svc_release,
.bind = svc_bind,
.connect = svc_connect,
.socketpair = sock_no_socketpair,
.accept = svc_accept,
.getname = svc_getname,
.poll = vcc_poll,
.ioctl = svc_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = svc_compat_ioctl,
#endif
.gettstamp = sock_gettstamp,
.listen = svc_listen,
.shutdown = svc_shutdown,
.setsockopt = svc_setsockopt,
.getsockopt = svc_getsockopt,
.sendmsg = vcc_sendmsg,
.recvmsg = vcc_recvmsg,
.mmap = sock_no_mmap,
};
static int svc_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
int error;
if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
sock->ops = &svc_proto_ops;
error = vcc_create(net, sock, protocol, AF_ATMSVC, kern);
if (error)
return error;
ATM_SD(sock)->local.sas_family = AF_ATMSVC;
ATM_SD(sock)->remote.sas_family = AF_ATMSVC;
return 0;
}
static const struct net_proto_family svc_family_ops = {
.family = PF_ATMSVC,
.create = svc_create,
.owner = THIS_MODULE,
};
/*
* Initialize the ATM SVC protocol family
*/
int __init atmsvc_init(void)
{
return sock_register(&svc_family_ops);
}
void atmsvc_exit(void)
{
sock_unregister(PF_ATMSVC);
}
| linux-master | net/atm/svc.c |
// SPDX-License-Identifier: GPL-2.0-only
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/capability.h>
#include <linux/seq_file.h>
/* We are an ethernet device */
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <net/sock.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/uaccess.h>
#include <asm/byteorder.h>
#include <net/checksum.h> /* for ip_fast_csum() */
#include <net/arp.h>
#include <net/dst.h>
#include <linux/proc_fs.h>
/* And atm device */
#include <linux/atmdev.h>
#include <linux/atmlec.h>
#include <linux/atmmpc.h>
/* Modular too */
#include <linux/module.h>
#include "lec.h"
#include "mpc.h"
#include "resources.h"
/*
* mpc.c: Implementation of MPOA client kernel part
*/
#if 0
#define dprintk(format, args...) \
printk(KERN_DEBUG "mpoa:%s: " format, __func__, ##args)
#define dprintk_cont(format, args...) printk(KERN_CONT format, ##args)
#else
#define dprintk(format, args...) \
do { if (0) \
printk(KERN_DEBUG "mpoa:%s: " format, __func__, ##args);\
} while (0)
#define dprintk_cont(format, args...) \
do { if (0) printk(KERN_CONT format, ##args); } while (0)
#endif
#if 0
#define ddprintk(format, args...) \
printk(KERN_DEBUG "mpoa:%s: " format, __func__, ##args)
#define ddprintk_cont(format, args...) printk(KERN_CONT format, ##args)
#else
#define ddprintk(format, args...) \
do { if (0) \
printk(KERN_DEBUG "mpoa:%s: " format, __func__, ##args);\
} while (0)
#define ddprintk_cont(format, args...) \
do { if (0) printk(KERN_CONT format, ##args); } while (0)
#endif
/* mpc_daemon -> kernel */
static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc);
static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc);
static void ingress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc);
static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc);
static void mps_death(struct k_message *msg, struct mpoa_client *mpc);
static void clean_up(struct k_message *msg, struct mpoa_client *mpc,
int action);
static void MPOA_cache_impos_rcvd(struct k_message *msg,
struct mpoa_client *mpc);
static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg,
struct mpoa_client *mpc);
static void set_mps_mac_addr_rcvd(struct k_message *mesg,
struct mpoa_client *mpc);
static const uint8_t *copy_macs(struct mpoa_client *mpc,
const uint8_t *router_mac,
const uint8_t *tlvs, uint8_t mps_macs,
uint8_t device_type);
static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry);
static void send_set_mps_ctrl_addr(const char *addr, struct mpoa_client *mpc);
static void mpoad_close(struct atm_vcc *vcc);
static int msg_from_mpoad(struct atm_vcc *vcc, struct sk_buff *skb);
static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb);
static netdev_tx_t mpc_send_packet(struct sk_buff *skb,
struct net_device *dev);
static int mpoa_event_listener(struct notifier_block *mpoa_notifier,
unsigned long event, void *dev);
static void mpc_timer_refresh(void);
static void mpc_cache_check(struct timer_list *unused);
static struct llc_snap_hdr llc_snap_mpoa_ctrl = {
0xaa, 0xaa, 0x03,
{0x00, 0x00, 0x5e},
{0x00, 0x03} /* For MPOA control PDUs */
};
static struct llc_snap_hdr llc_snap_mpoa_data = {
0xaa, 0xaa, 0x03,
{0x00, 0x00, 0x00},
{0x08, 0x00} /* This is for IP PDUs only */
};
static struct llc_snap_hdr llc_snap_mpoa_data_tagged = {
0xaa, 0xaa, 0x03,
{0x00, 0x00, 0x00},
{0x88, 0x4c} /* This is for tagged data PDUs */
};
static struct notifier_block mpoa_notifier = {
mpoa_event_listener,
NULL,
0
};
struct mpoa_client *mpcs = NULL; /* FIXME */
static struct atm_mpoa_qos *qos_head = NULL;
static DEFINE_TIMER(mpc_timer, mpc_cache_check);
static struct mpoa_client *find_mpc_by_itfnum(int itf)
{
struct mpoa_client *mpc;
mpc = mpcs; /* our global linked list */
while (mpc != NULL) {
if (mpc->dev_num == itf)
return mpc;
mpc = mpc->next;
}
return NULL; /* not found */
}
static struct mpoa_client *find_mpc_by_vcc(struct atm_vcc *vcc)
{
struct mpoa_client *mpc;
mpc = mpcs; /* our global linked list */
while (mpc != NULL) {
if (mpc->mpoad_vcc == vcc)
return mpc;
mpc = mpc->next;
}
return NULL; /* not found */
}
static struct mpoa_client *find_mpc_by_lec(struct net_device *dev)
{
struct mpoa_client *mpc;
mpc = mpcs; /* our global linked list */
while (mpc != NULL) {
if (mpc->dev == dev)
return mpc;
mpc = mpc->next;
}
return NULL; /* not found */
}
/*
* Functions for managing QoS list
*/
/*
* Overwrites the old entry or makes a new one.
*/
struct atm_mpoa_qos *atm_mpoa_add_qos(__be32 dst_ip, struct atm_qos *qos)
{
struct atm_mpoa_qos *entry;
entry = atm_mpoa_search_qos(dst_ip);
if (entry != NULL) {
entry->qos = *qos;
return entry;
}
entry = kmalloc(sizeof(struct atm_mpoa_qos), GFP_KERNEL);
if (entry == NULL) {
pr_info("mpoa: out of memory\n");
return entry;
}
entry->ipaddr = dst_ip;
entry->qos = *qos;
entry->next = qos_head;
qos_head = entry;
return entry;
}
struct atm_mpoa_qos *atm_mpoa_search_qos(__be32 dst_ip)
{
struct atm_mpoa_qos *qos;
qos = qos_head;
while (qos) {
if (qos->ipaddr == dst_ip)
break;
qos = qos->next;
}
return qos;
}
/*
* Returns 0 for failure
*/
int atm_mpoa_delete_qos(struct atm_mpoa_qos *entry)
{
struct atm_mpoa_qos *curr;
if (entry == NULL)
return 0;
if (entry == qos_head) {
qos_head = qos_head->next;
kfree(entry);
return 1;
}
curr = qos_head;
while (curr != NULL) {
if (curr->next == entry) {
curr->next = entry->next;
kfree(entry);
return 1;
}
curr = curr->next;
}
return 0;
}
/* this is buggered - we need locking for qos_head */
void atm_mpoa_disp_qos(struct seq_file *m)
{
struct atm_mpoa_qos *qos;
qos = qos_head;
seq_printf(m, "QoS entries for shortcuts:\n");
seq_printf(m, "IP address\n TX:max_pcr pcr min_pcr max_cdv max_sdu\n RX:max_pcr pcr min_pcr max_cdv max_sdu\n");
while (qos != NULL) {
seq_printf(m, "%pI4\n %-7d %-7d %-7d %-7d %-7d\n %-7d %-7d %-7d %-7d %-7d\n",
&qos->ipaddr,
qos->qos.txtp.max_pcr,
qos->qos.txtp.pcr,
qos->qos.txtp.min_pcr,
qos->qos.txtp.max_cdv,
qos->qos.txtp.max_sdu,
qos->qos.rxtp.max_pcr,
qos->qos.rxtp.pcr,
qos->qos.rxtp.min_pcr,
qos->qos.rxtp.max_cdv,
qos->qos.rxtp.max_sdu);
qos = qos->next;
}
}
static struct net_device *find_lec_by_itfnum(int itf)
{
struct net_device *dev;
char name[IFNAMSIZ];
sprintf(name, "lec%d", itf);
dev = dev_get_by_name(&init_net, name);
return dev;
}
static struct mpoa_client *alloc_mpc(void)
{
struct mpoa_client *mpc;
mpc = kzalloc(sizeof(struct mpoa_client), GFP_KERNEL);
if (mpc == NULL)
return NULL;
rwlock_init(&mpc->ingress_lock);
rwlock_init(&mpc->egress_lock);
mpc->next = mpcs;
atm_mpoa_init_cache(mpc);
mpc->parameters.mpc_p1 = MPC_P1;
mpc->parameters.mpc_p2 = MPC_P2;
memset(mpc->parameters.mpc_p3, 0, sizeof(mpc->parameters.mpc_p3));
mpc->parameters.mpc_p4 = MPC_P4;
mpc->parameters.mpc_p5 = MPC_P5;
mpc->parameters.mpc_p6 = MPC_P6;
mpcs = mpc;
return mpc;
}
/*
*
* start_mpc() puts the MPC on line. All the packets destined
* to the lec underneath us are now being monitored and
* shortcuts will be established.
*
*/
static void start_mpc(struct mpoa_client *mpc, struct net_device *dev)
{
dprintk("(%s)\n", mpc->dev->name);
if (!dev->netdev_ops)
pr_info("(%s) not starting\n", dev->name);
else {
mpc->old_ops = dev->netdev_ops;
mpc->new_ops = *mpc->old_ops;
mpc->new_ops.ndo_start_xmit = mpc_send_packet;
dev->netdev_ops = &mpc->new_ops;
}
}
static void stop_mpc(struct mpoa_client *mpc)
{
struct net_device *dev = mpc->dev;
dprintk("(%s)", mpc->dev->name);
/* Lets not nullify lec device's dev->hard_start_xmit */
if (dev->netdev_ops != &mpc->new_ops) {
dprintk_cont(" mpc already stopped, not fatal\n");
return;
}
dprintk_cont("\n");
dev->netdev_ops = mpc->old_ops;
mpc->old_ops = NULL;
/* close_shortcuts(mpc); ??? FIXME */
}
static const char *mpoa_device_type_string(char type) __attribute__ ((unused));
static const char *mpoa_device_type_string(char type)
{
switch (type) {
case NON_MPOA:
return "non-MPOA device";
case MPS:
return "MPS";
case MPC:
return "MPC";
case MPS_AND_MPC:
return "both MPS and MPC";
}
return "unspecified (non-MPOA) device";
}
/*
* lec device calls this via its netdev_priv(dev)->lane2_ops
* ->associate_indicator() when it sees a TLV in LE_ARP packet.
* We fill in the pointer above when we see a LANE2 lec initializing
* See LANE2 spec 3.1.5
*
* Quite a big and ugly function but when you look at it
* all it does is to try to locate and parse MPOA Device
* Type TLV.
* We give our lec a pointer to this function and when the
* lec sees a TLV it uses the pointer to call this function.
*
*/
static void lane2_assoc_ind(struct net_device *dev, const u8 *mac_addr,
const u8 *tlvs, u32 sizeoftlvs)
{
uint32_t type;
uint8_t length, mpoa_device_type, number_of_mps_macs;
const uint8_t *end_of_tlvs;
struct mpoa_client *mpc;
mpoa_device_type = number_of_mps_macs = 0; /* silence gcc */
dprintk("(%s) received TLV(s), ", dev->name);
dprintk("total length of all TLVs %d\n", sizeoftlvs);
mpc = find_mpc_by_lec(dev); /* Sampo-Fix: moved here from below */
if (mpc == NULL) {
pr_info("(%s) no mpc\n", dev->name);
return;
}
end_of_tlvs = tlvs + sizeoftlvs;
while (end_of_tlvs - tlvs >= 5) {
type = ((tlvs[0] << 24) | (tlvs[1] << 16) |
(tlvs[2] << 8) | tlvs[3]);
length = tlvs[4];
tlvs += 5;
dprintk(" type 0x%x length %02x\n", type, length);
if (tlvs + length > end_of_tlvs) {
pr_info("TLV value extends past its buffer, aborting parse\n");
return;
}
if (type == 0) {
pr_info("mpoa: (%s) TLV type was 0, returning\n",
dev->name);
return;
}
if (type != TLV_MPOA_DEVICE_TYPE) {
tlvs += length;
continue; /* skip other TLVs */
}
mpoa_device_type = *tlvs++;
number_of_mps_macs = *tlvs++;
dprintk("(%s) MPOA device type '%s', ",
dev->name, mpoa_device_type_string(mpoa_device_type));
if (mpoa_device_type == MPS_AND_MPC &&
length < (42 + number_of_mps_macs*ETH_ALEN)) { /* :) */
pr_info("(%s) short MPOA Device Type TLV\n",
dev->name);
continue;
}
if ((mpoa_device_type == MPS || mpoa_device_type == MPC) &&
length < 22 + number_of_mps_macs*ETH_ALEN) {
pr_info("(%s) short MPOA Device Type TLV\n", dev->name);
continue;
}
if (mpoa_device_type != MPS &&
mpoa_device_type != MPS_AND_MPC) {
dprintk("ignoring non-MPS device ");
if (mpoa_device_type == MPC)
tlvs += 20;
continue; /* we are only interested in MPSs */
}
if (number_of_mps_macs == 0 &&
mpoa_device_type == MPS_AND_MPC) {
pr_info("(%s) MPS_AND_MPC has zero MACs\n", dev->name);
continue; /* someone should read the spec */
}
dprintk_cont("this MPS has %d MAC addresses\n",
number_of_mps_macs);
/*
* ok, now we can go and tell our daemon
* the control address of MPS
*/
send_set_mps_ctrl_addr(tlvs, mpc);
tlvs = copy_macs(mpc, mac_addr, tlvs,
number_of_mps_macs, mpoa_device_type);
if (tlvs == NULL)
return;
}
if (end_of_tlvs - tlvs != 0)
pr_info("(%s) ignoring %zd bytes of trailing TLV garbage\n",
dev->name, end_of_tlvs - tlvs);
}
/*
* Store at least advertizing router's MAC address
* plus the possible MAC address(es) to mpc->mps_macs.
* For a freshly allocated MPOA client mpc->mps_macs == 0.
*/
static const uint8_t *copy_macs(struct mpoa_client *mpc,
const uint8_t *router_mac,
const uint8_t *tlvs, uint8_t mps_macs,
uint8_t device_type)
{
int num_macs;
num_macs = (mps_macs > 1) ? mps_macs : 1;
if (mpc->number_of_mps_macs != num_macs) { /* need to reallocate? */
if (mpc->number_of_mps_macs != 0)
kfree(mpc->mps_macs);
mpc->number_of_mps_macs = 0;
mpc->mps_macs = kmalloc_array(ETH_ALEN, num_macs, GFP_KERNEL);
if (mpc->mps_macs == NULL) {
pr_info("(%s) out of mem\n", mpc->dev->name);
return NULL;
}
}
ether_addr_copy(mpc->mps_macs, router_mac);
tlvs += 20; if (device_type == MPS_AND_MPC) tlvs += 20;
if (mps_macs > 0)
memcpy(mpc->mps_macs, tlvs, mps_macs*ETH_ALEN);
tlvs += mps_macs*ETH_ALEN;
mpc->number_of_mps_macs = num_macs;
return tlvs;
}
static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc)
{
in_cache_entry *entry;
struct iphdr *iph;
char *buff;
__be32 ipaddr = 0;
static struct {
struct llc_snap_hdr hdr;
__be32 tag;
} tagged_llc_snap_hdr = {
{0xaa, 0xaa, 0x03, {0x00, 0x00, 0x00}, {0x88, 0x4c}},
0
};
buff = skb->data + mpc->dev->hard_header_len;
iph = (struct iphdr *)buff;
ipaddr = iph->daddr;
ddprintk("(%s) ipaddr 0x%x\n",
mpc->dev->name, ipaddr);
entry = mpc->in_ops->get(ipaddr, mpc);
if (entry == NULL) {
entry = mpc->in_ops->add_entry(ipaddr, mpc);
if (entry != NULL)
mpc->in_ops->put(entry);
return 1;
}
/* threshold not exceeded or VCC not ready */
if (mpc->in_ops->cache_hit(entry, mpc) != OPEN) {
ddprintk("(%s) cache_hit: returns != OPEN\n",
mpc->dev->name);
mpc->in_ops->put(entry);
return 1;
}
ddprintk("(%s) using shortcut\n",
mpc->dev->name);
/* MPOA spec A.1.4, MPOA client must decrement IP ttl at least by one */
if (iph->ttl <= 1) {
ddprintk("(%s) IP ttl = %u, using LANE\n",
mpc->dev->name, iph->ttl);
mpc->in_ops->put(entry);
return 1;
}
iph->ttl--;
iph->check = 0;
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
if (entry->ctrl_info.tag != 0) {
ddprintk("(%s) adding tag 0x%x\n",
mpc->dev->name, entry->ctrl_info.tag);
tagged_llc_snap_hdr.tag = entry->ctrl_info.tag;
skb_pull(skb, ETH_HLEN); /* get rid of Eth header */
skb_push(skb, sizeof(tagged_llc_snap_hdr));
/* add LLC/SNAP header */
skb_copy_to_linear_data(skb, &tagged_llc_snap_hdr,
sizeof(tagged_llc_snap_hdr));
} else {
skb_pull(skb, ETH_HLEN); /* get rid of Eth header */
skb_push(skb, sizeof(struct llc_snap_hdr));
/* add LLC/SNAP header + tag */
skb_copy_to_linear_data(skb, &llc_snap_mpoa_data,
sizeof(struct llc_snap_hdr));
}
atm_account_tx(entry->shortcut, skb);
entry->shortcut->send(entry->shortcut, skb);
entry->packets_fwded++;
mpc->in_ops->put(entry);
return 0;
}
/*
* Probably needs some error checks and locking, not sure...
*/
static netdev_tx_t mpc_send_packet(struct sk_buff *skb,
struct net_device *dev)
{
struct mpoa_client *mpc;
struct ethhdr *eth;
int i = 0;
mpc = find_mpc_by_lec(dev); /* this should NEVER fail */
if (mpc == NULL) {
pr_info("(%s) no MPC found\n", dev->name);
goto non_ip;
}
eth = (struct ethhdr *)skb->data;
if (eth->h_proto != htons(ETH_P_IP))
goto non_ip; /* Multi-Protocol Over ATM :-) */
/* Weed out funny packets (e.g., AF_PACKET or raw). */
if (skb->len < ETH_HLEN + sizeof(struct iphdr))
goto non_ip;
skb_set_network_header(skb, ETH_HLEN);
if (skb->len < ETH_HLEN + ip_hdr(skb)->ihl * 4 || ip_hdr(skb)->ihl < 5)
goto non_ip;
while (i < mpc->number_of_mps_macs) {
if (ether_addr_equal(eth->h_dest, mpc->mps_macs + i * ETH_ALEN))
if (send_via_shortcut(skb, mpc) == 0) /* try shortcut */
return NETDEV_TX_OK;
i++;
}
non_ip:
return __netdev_start_xmit(mpc->old_ops, skb, dev, false);
}
static int atm_mpoa_vcc_attach(struct atm_vcc *vcc, void __user *arg)
{
int bytes_left;
struct mpoa_client *mpc;
struct atmmpc_ioc ioc_data;
in_cache_entry *in_entry;
__be32 ipaddr;
bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmmpc_ioc));
if (bytes_left != 0) {
pr_info("mpoa:Short read (missed %d bytes) from userland\n",
bytes_left);
return -EFAULT;
}
ipaddr = ioc_data.ipaddr;
if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF)
return -EINVAL;
mpc = find_mpc_by_itfnum(ioc_data.dev_num);
if (mpc == NULL)
return -EINVAL;
if (ioc_data.type == MPC_SOCKET_INGRESS) {
in_entry = mpc->in_ops->get(ipaddr, mpc);
if (in_entry == NULL ||
in_entry->entry_state < INGRESS_RESOLVED) {
pr_info("(%s) did not find RESOLVED entry from ingress cache\n",
mpc->dev->name);
if (in_entry != NULL)
mpc->in_ops->put(in_entry);
return -EINVAL;
}
pr_info("(%s) attaching ingress SVC, entry = %pI4\n",
mpc->dev->name, &in_entry->ctrl_info.in_dst_ip);
in_entry->shortcut = vcc;
mpc->in_ops->put(in_entry);
} else {
pr_info("(%s) attaching egress SVC\n", mpc->dev->name);
}
vcc->proto_data = mpc->dev;
vcc->push = mpc_push;
return 0;
}
/*
*
*/
static void mpc_vcc_close(struct atm_vcc *vcc, struct net_device *dev)
{
struct mpoa_client *mpc;
in_cache_entry *in_entry;
eg_cache_entry *eg_entry;
mpc = find_mpc_by_lec(dev);
if (mpc == NULL) {
pr_info("(%s) close for unknown MPC\n", dev->name);
return;
}
dprintk("(%s)\n", dev->name);
in_entry = mpc->in_ops->get_by_vcc(vcc, mpc);
if (in_entry) {
dprintk("(%s) ingress SVC closed ip = %pI4\n",
mpc->dev->name, &in_entry->ctrl_info.in_dst_ip);
in_entry->shortcut = NULL;
mpc->in_ops->put(in_entry);
}
eg_entry = mpc->eg_ops->get_by_vcc(vcc, mpc);
if (eg_entry) {
dprintk("(%s) egress SVC closed\n", mpc->dev->name);
eg_entry->shortcut = NULL;
mpc->eg_ops->put(eg_entry);
}
if (in_entry == NULL && eg_entry == NULL)
dprintk("(%s) unused vcc closed\n", dev->name);
}
static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
{
struct net_device *dev = (struct net_device *)vcc->proto_data;
struct sk_buff *new_skb;
eg_cache_entry *eg;
struct mpoa_client *mpc;
__be32 tag;
char *tmp;
ddprintk("(%s)\n", dev->name);
if (skb == NULL) {
dprintk("(%s) null skb, closing VCC\n", dev->name);
mpc_vcc_close(vcc, dev);
return;
}
skb->dev = dev;
if (memcmp(skb->data, &llc_snap_mpoa_ctrl,
sizeof(struct llc_snap_hdr)) == 0) {
struct sock *sk = sk_atm(vcc);
dprintk("(%s) control packet arrived\n", dev->name);
/* Pass control packets to daemon */
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_data_ready(sk);
return;
}
/* data coming over the shortcut */
atm_return(vcc, skb->truesize);
mpc = find_mpc_by_lec(dev);
if (mpc == NULL) {
pr_info("(%s) unknown MPC\n", dev->name);
return;
}
if (memcmp(skb->data, &llc_snap_mpoa_data_tagged,
sizeof(struct llc_snap_hdr)) == 0) { /* MPOA tagged data */
ddprintk("(%s) tagged data packet arrived\n", dev->name);
} else if (memcmp(skb->data, &llc_snap_mpoa_data,
sizeof(struct llc_snap_hdr)) == 0) { /* MPOA data */
pr_info("(%s) Unsupported non-tagged data packet arrived. Purging\n",
dev->name);
dev_kfree_skb_any(skb);
return;
} else {
pr_info("(%s) garbage arrived, purging\n", dev->name);
dev_kfree_skb_any(skb);
return;
}
tmp = skb->data + sizeof(struct llc_snap_hdr);
tag = *(__be32 *)tmp;
eg = mpc->eg_ops->get_by_tag(tag, mpc);
if (eg == NULL) {
pr_info("mpoa: (%s) Didn't find egress cache entry, tag = %u\n",
dev->name, tag);
purge_egress_shortcut(vcc, NULL);
dev_kfree_skb_any(skb);
return;
}
/*
* See if ingress MPC is using shortcut we opened as a return channel.
* This means we have a bi-directional vcc opened by us.
*/
if (eg->shortcut == NULL) {
eg->shortcut = vcc;
pr_info("(%s) egress SVC in use\n", dev->name);
}
skb_pull(skb, sizeof(struct llc_snap_hdr) + sizeof(tag));
/* get rid of LLC/SNAP header */
new_skb = skb_realloc_headroom(skb, eg->ctrl_info.DH_length);
/* LLC/SNAP is shorter than MAC header :( */
dev_kfree_skb_any(skb);
if (new_skb == NULL) {
mpc->eg_ops->put(eg);
return;
}
skb_push(new_skb, eg->ctrl_info.DH_length); /* add MAC header */
skb_copy_to_linear_data(new_skb, eg->ctrl_info.DLL_header,
eg->ctrl_info.DH_length);
new_skb->protocol = eth_type_trans(new_skb, dev);
skb_reset_network_header(new_skb);
eg->latest_ip_addr = ip_hdr(new_skb)->saddr;
eg->packets_rcvd++;
mpc->eg_ops->put(eg);
memset(ATM_SKB(new_skb), 0, sizeof(struct atm_skb_data));
netif_rx(new_skb);
}
static const struct atmdev_ops mpc_ops = { /* only send is required */
.close = mpoad_close,
.send = msg_from_mpoad
};
static struct atm_dev mpc_dev = {
.ops = &mpc_ops,
.type = "mpc",
.number = 42,
.lock = __SPIN_LOCK_UNLOCKED(mpc_dev.lock)
/* members not explicitly initialised will be 0 */
};
static int atm_mpoa_mpoad_attach(struct atm_vcc *vcc, int arg)
{
struct mpoa_client *mpc;
struct lec_priv *priv;
int err;
if (mpcs == NULL) {
mpc_timer_refresh();
/* This lets us now how our LECs are doing */
err = register_netdevice_notifier(&mpoa_notifier);
if (err < 0) {
del_timer(&mpc_timer);
return err;
}
}
mpc = find_mpc_by_itfnum(arg);
if (mpc == NULL) {
dprintk("allocating new mpc for itf %d\n", arg);
mpc = alloc_mpc();
if (mpc == NULL)
return -ENOMEM;
mpc->dev_num = arg;
mpc->dev = find_lec_by_itfnum(arg);
/* NULL if there was no lec */
}
if (mpc->mpoad_vcc) {
pr_info("mpoad is already present for itf %d\n", arg);
return -EADDRINUSE;
}
if (mpc->dev) { /* check if the lec is LANE2 capable */
priv = netdev_priv(mpc->dev);
if (priv->lane_version < 2) {
dev_put(mpc->dev);
mpc->dev = NULL;
} else
priv->lane2_ops->associate_indicator = lane2_assoc_ind;
}
mpc->mpoad_vcc = vcc;
vcc->dev = &mpc_dev;
vcc_insert_socket(sk_atm(vcc));
set_bit(ATM_VF_META, &vcc->flags);
set_bit(ATM_VF_READY, &vcc->flags);
if (mpc->dev) {
char empty[ATM_ESA_LEN];
memset(empty, 0, ATM_ESA_LEN);
start_mpc(mpc, mpc->dev);
/* set address if mpcd e.g. gets killed and restarted.
* If we do not do it now we have to wait for the next LE_ARP
*/
if (memcmp(mpc->mps_ctrl_addr, empty, ATM_ESA_LEN) != 0)
send_set_mps_ctrl_addr(mpc->mps_ctrl_addr, mpc);
}
__module_get(THIS_MODULE);
return arg;
}
static void send_set_mps_ctrl_addr(const char *addr, struct mpoa_client *mpc)
{
struct k_message mesg;
memcpy(mpc->mps_ctrl_addr, addr, ATM_ESA_LEN);
mesg.type = SET_MPS_CTRL_ADDR;
memcpy(mesg.MPS_ctrl, addr, ATM_ESA_LEN);
msg_to_mpoad(&mesg, mpc);
}
static void mpoad_close(struct atm_vcc *vcc)
{
struct mpoa_client *mpc;
struct sk_buff *skb;
mpc = find_mpc_by_vcc(vcc);
if (mpc == NULL) {
pr_info("did not find MPC\n");
return;
}
if (!mpc->mpoad_vcc) {
pr_info("close for non-present mpoad\n");
return;
}
mpc->mpoad_vcc = NULL;
if (mpc->dev) {
struct lec_priv *priv = netdev_priv(mpc->dev);
priv->lane2_ops->associate_indicator = NULL;
stop_mpc(mpc);
dev_put(mpc->dev);
}
mpc->in_ops->destroy_cache(mpc);
mpc->eg_ops->destroy_cache(mpc);
while ((skb = skb_dequeue(&sk_atm(vcc)->sk_receive_queue))) {
atm_return(vcc, skb->truesize);
kfree_skb(skb);
}
pr_info("(%s) going down\n",
(mpc->dev) ? mpc->dev->name : "<unknown>");
module_put(THIS_MODULE);
}
/*
*
*/
static int msg_from_mpoad(struct atm_vcc *vcc, struct sk_buff *skb)
{
struct mpoa_client *mpc = find_mpc_by_vcc(vcc);
struct k_message *mesg = (struct k_message *)skb->data;
WARN_ON(refcount_sub_and_test(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc));
if (mpc == NULL) {
pr_info("no mpc found\n");
return 0;
}
dprintk("(%s)", mpc->dev ? mpc->dev->name : "<unknown>");
switch (mesg->type) {
case MPOA_RES_REPLY_RCVD:
dprintk_cont("mpoa_res_reply_rcvd\n");
MPOA_res_reply_rcvd(mesg, mpc);
break;
case MPOA_TRIGGER_RCVD:
dprintk_cont("mpoa_trigger_rcvd\n");
MPOA_trigger_rcvd(mesg, mpc);
break;
case INGRESS_PURGE_RCVD:
dprintk_cont("nhrp_purge_rcvd\n");
ingress_purge_rcvd(mesg, mpc);
break;
case EGRESS_PURGE_RCVD:
dprintk_cont("egress_purge_reply_rcvd\n");
egress_purge_rcvd(mesg, mpc);
break;
case MPS_DEATH:
dprintk_cont("mps_death\n");
mps_death(mesg, mpc);
break;
case CACHE_IMPOS_RCVD:
dprintk_cont("cache_impos_rcvd\n");
MPOA_cache_impos_rcvd(mesg, mpc);
break;
case SET_MPC_CTRL_ADDR:
dprintk_cont("set_mpc_ctrl_addr\n");
set_mpc_ctrl_addr_rcvd(mesg, mpc);
break;
case SET_MPS_MAC_ADDR:
dprintk_cont("set_mps_mac_addr\n");
set_mps_mac_addr_rcvd(mesg, mpc);
break;
case CLEAN_UP_AND_EXIT:
dprintk_cont("clean_up_and_exit\n");
clean_up(mesg, mpc, DIE);
break;
case RELOAD:
dprintk_cont("reload\n");
clean_up(mesg, mpc, RELOAD);
break;
case SET_MPC_PARAMS:
dprintk_cont("set_mpc_params\n");
mpc->parameters = mesg->content.params;
break;
default:
dprintk_cont("unknown message %d\n", mesg->type);
break;
}
kfree_skb(skb);
return 0;
}
/* Remember that this function may not do things that sleep */
int msg_to_mpoad(struct k_message *mesg, struct mpoa_client *mpc)
{
struct sk_buff *skb;
struct sock *sk;
if (mpc == NULL || !mpc->mpoad_vcc) {
pr_info("mesg %d to a non-existent mpoad\n", mesg->type);
return -ENXIO;
}
skb = alloc_skb(sizeof(struct k_message), GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
skb_put(skb, sizeof(struct k_message));
skb_copy_to_linear_data(skb, mesg, sizeof(*mesg));
atm_force_charge(mpc->mpoad_vcc, skb->truesize);
sk = sk_atm(mpc->mpoad_vcc);
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_data_ready(sk);
return 0;
}
static int mpoa_event_listener(struct notifier_block *mpoa_notifier,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct mpoa_client *mpc;
struct lec_priv *priv;
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
if (strncmp(dev->name, "lec", 3))
return NOTIFY_DONE; /* we are only interested in lec:s */
switch (event) {
case NETDEV_REGISTER: /* a new lec device was allocated */
priv = netdev_priv(dev);
if (priv->lane_version < 2)
break;
priv->lane2_ops->associate_indicator = lane2_assoc_ind;
mpc = find_mpc_by_itfnum(priv->itfnum);
if (mpc == NULL) {
dprintk("allocating new mpc for %s\n", dev->name);
mpc = alloc_mpc();
if (mpc == NULL) {
pr_info("no new mpc");
break;
}
}
mpc->dev_num = priv->itfnum;
mpc->dev = dev;
dev_hold(dev);
dprintk("(%s) was initialized\n", dev->name);
break;
case NETDEV_UNREGISTER:
/* the lec device was deallocated */
mpc = find_mpc_by_lec(dev);
if (mpc == NULL)
break;
dprintk("device (%s) was deallocated\n", dev->name);
stop_mpc(mpc);
dev_put(mpc->dev);
mpc->dev = NULL;
break;
case NETDEV_UP:
/* the dev was ifconfig'ed up */
mpc = find_mpc_by_lec(dev);
if (mpc == NULL)
break;
if (mpc->mpoad_vcc != NULL)
start_mpc(mpc, dev);
break;
case NETDEV_DOWN:
/* the dev was ifconfig'ed down */
/* this means that the flow of packets from the
* upper layer stops
*/
mpc = find_mpc_by_lec(dev);
if (mpc == NULL)
break;
if (mpc->mpoad_vcc != NULL)
stop_mpc(mpc);
break;
case NETDEV_REBOOT:
case NETDEV_CHANGE:
case NETDEV_CHANGEMTU:
case NETDEV_CHANGEADDR:
case NETDEV_GOING_DOWN:
break;
default:
break;
}
return NOTIFY_DONE;
}
/*
* Functions which are called after a message is received from mpcd.
* Msg is reused on purpose.
*/
static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc)
{
__be32 dst_ip = msg->content.in_info.in_dst_ip;
in_cache_entry *entry;
entry = mpc->in_ops->get(dst_ip, mpc);
if (entry == NULL) {
entry = mpc->in_ops->add_entry(dst_ip, mpc);
entry->entry_state = INGRESS_RESOLVING;
msg->type = SND_MPOA_RES_RQST;
msg->content.in_info = entry->ctrl_info;
msg_to_mpoad(msg, mpc);
entry->reply_wait = ktime_get_seconds();
mpc->in_ops->put(entry);
return;
}
if (entry->entry_state == INGRESS_INVALID) {
entry->entry_state = INGRESS_RESOLVING;
msg->type = SND_MPOA_RES_RQST;
msg->content.in_info = entry->ctrl_info;
msg_to_mpoad(msg, mpc);
entry->reply_wait = ktime_get_seconds();
mpc->in_ops->put(entry);
return;
}
pr_info("(%s) entry already in resolving state\n",
(mpc->dev) ? mpc->dev->name : "<unknown>");
mpc->in_ops->put(entry);
}
/*
* Things get complicated because we have to check if there's an egress
* shortcut with suitable traffic parameters we could use.
*/
static void check_qos_and_open_shortcut(struct k_message *msg,
struct mpoa_client *client,
in_cache_entry *entry)
{
__be32 dst_ip = msg->content.in_info.in_dst_ip;
struct atm_mpoa_qos *qos = atm_mpoa_search_qos(dst_ip);
eg_cache_entry *eg_entry = client->eg_ops->get_by_src_ip(dst_ip, client);
if (eg_entry && eg_entry->shortcut) {
if (eg_entry->shortcut->qos.txtp.traffic_class &
msg->qos.txtp.traffic_class &
(qos ? qos->qos.txtp.traffic_class : ATM_UBR | ATM_CBR)) {
if (eg_entry->shortcut->qos.txtp.traffic_class == ATM_UBR)
entry->shortcut = eg_entry->shortcut;
else if (eg_entry->shortcut->qos.txtp.max_pcr > 0)
entry->shortcut = eg_entry->shortcut;
}
if (entry->shortcut) {
dprintk("(%s) using egress SVC to reach %pI4\n",
client->dev->name, &dst_ip);
client->eg_ops->put(eg_entry);
return;
}
}
if (eg_entry != NULL)
client->eg_ops->put(eg_entry);
/* No luck in the egress cache we must open an ingress SVC */
msg->type = OPEN_INGRESS_SVC;
if (qos &&
(qos->qos.txtp.traffic_class == msg->qos.txtp.traffic_class)) {
msg->qos = qos->qos;
pr_info("(%s) trying to get a CBR shortcut\n",
client->dev->name);
} else
memset(&msg->qos, 0, sizeof(struct atm_qos));
msg_to_mpoad(msg, client);
}
static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc)
{
__be32 dst_ip = msg->content.in_info.in_dst_ip;
in_cache_entry *entry = mpc->in_ops->get(dst_ip, mpc);
dprintk("(%s) ip %pI4\n",
mpc->dev->name, &dst_ip);
ddprintk("(%s) entry = %p",
mpc->dev->name, entry);
if (entry == NULL) {
pr_info("(%s) ARGH, received res. reply for an entry that doesn't exist.\n",
mpc->dev->name);
return;
}
ddprintk_cont(" entry_state = %d ", entry->entry_state);
if (entry->entry_state == INGRESS_RESOLVED) {
pr_info("(%s) RESOLVED entry!\n", mpc->dev->name);
mpc->in_ops->put(entry);
return;
}
entry->ctrl_info = msg->content.in_info;
entry->time = ktime_get_seconds();
/* Used in refreshing func from now on */
entry->reply_wait = ktime_get_seconds();
entry->refresh_time = 0;
ddprintk_cont("entry->shortcut = %p\n", entry->shortcut);
if (entry->entry_state == INGRESS_RESOLVING &&
entry->shortcut != NULL) {
entry->entry_state = INGRESS_RESOLVED;
mpc->in_ops->put(entry);
return; /* Shortcut already open... */
}
if (entry->shortcut != NULL) {
pr_info("(%s) entry->shortcut != NULL, impossible!\n",
mpc->dev->name);
mpc->in_ops->put(entry);
return;
}
check_qos_and_open_shortcut(msg, mpc, entry);
entry->entry_state = INGRESS_RESOLVED;
mpc->in_ops->put(entry);
return;
}
static void ingress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc)
{
__be32 dst_ip = msg->content.in_info.in_dst_ip;
__be32 mask = msg->ip_mask;
in_cache_entry *entry = mpc->in_ops->get_with_mask(dst_ip, mpc, mask);
if (entry == NULL) {
pr_info("(%s) purge for a non-existing entry, ip = %pI4\n",
mpc->dev->name, &dst_ip);
return;
}
do {
dprintk("(%s) removing an ingress entry, ip = %pI4\n",
mpc->dev->name, &dst_ip);
write_lock_bh(&mpc->ingress_lock);
mpc->in_ops->remove_entry(entry, mpc);
write_unlock_bh(&mpc->ingress_lock);
mpc->in_ops->put(entry);
entry = mpc->in_ops->get_with_mask(dst_ip, mpc, mask);
} while (entry != NULL);
}
static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc)
{
__be32 cache_id = msg->content.eg_info.cache_id;
eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(cache_id, mpc);
if (entry == NULL) {
dprintk("(%s) purge for a non-existing entry\n",
mpc->dev->name);
return;
}
write_lock_irq(&mpc->egress_lock);
mpc->eg_ops->remove_entry(entry, mpc);
write_unlock_irq(&mpc->egress_lock);
mpc->eg_ops->put(entry);
}
static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry)
{
struct sock *sk;
struct k_message *purge_msg;
struct sk_buff *skb;
dprintk("entering\n");
if (vcc == NULL) {
pr_info("vcc == NULL\n");
return;
}
skb = alloc_skb(sizeof(struct k_message), GFP_ATOMIC);
if (skb == NULL) {
pr_info("out of memory\n");
return;
}
skb_put(skb, sizeof(struct k_message));
memset(skb->data, 0, sizeof(struct k_message));
purge_msg = (struct k_message *)skb->data;
purge_msg->type = DATA_PLANE_PURGE;
if (entry != NULL)
purge_msg->content.eg_info = entry->ctrl_info;
atm_force_charge(vcc, skb->truesize);
sk = sk_atm(vcc);
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_data_ready(sk);
dprintk("exiting\n");
}
/*
* Our MPS died. Tell our daemon to send NHRP data plane purge to each
* of the egress shortcuts we have.
*/
static void mps_death(struct k_message *msg, struct mpoa_client *mpc)
{
eg_cache_entry *entry;
dprintk("(%s)\n", mpc->dev->name);
if (memcmp(msg->MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN)) {
pr_info("(%s) wrong MPS\n", mpc->dev->name);
return;
}
/* FIXME: This knows too much of the cache structure */
read_lock_irq(&mpc->egress_lock);
entry = mpc->eg_cache;
while (entry != NULL) {
purge_egress_shortcut(entry->shortcut, entry);
entry = entry->next;
}
read_unlock_irq(&mpc->egress_lock);
mpc->in_ops->destroy_cache(mpc);
mpc->eg_ops->destroy_cache(mpc);
}
static void MPOA_cache_impos_rcvd(struct k_message *msg,
struct mpoa_client *mpc)
{
uint16_t holding_time;
eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(msg->content.eg_info.cache_id, mpc);
holding_time = msg->content.eg_info.holding_time;
dprintk("(%s) entry = %p, holding_time = %u\n",
mpc->dev->name, entry, holding_time);
if (entry == NULL && holding_time) {
entry = mpc->eg_ops->add_entry(msg, mpc);
mpc->eg_ops->put(entry);
return;
}
if (holding_time) {
mpc->eg_ops->update(entry, holding_time);
return;
}
write_lock_irq(&mpc->egress_lock);
mpc->eg_ops->remove_entry(entry, mpc);
write_unlock_irq(&mpc->egress_lock);
mpc->eg_ops->put(entry);
}
static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg,
struct mpoa_client *mpc)
{
struct lec_priv *priv;
int i, retval ;
uint8_t tlv[4 + 1 + 1 + 1 + ATM_ESA_LEN];
tlv[0] = 00; tlv[1] = 0xa0; tlv[2] = 0x3e; tlv[3] = 0x2a; /* type */
tlv[4] = 1 + 1 + ATM_ESA_LEN; /* length */
tlv[5] = 0x02; /* MPOA client */
tlv[6] = 0x00; /* number of MPS MAC addresses */
memcpy(&tlv[7], mesg->MPS_ctrl, ATM_ESA_LEN); /* MPC ctrl ATM addr */
memcpy(mpc->our_ctrl_addr, mesg->MPS_ctrl, ATM_ESA_LEN);
dprintk("(%s) setting MPC ctrl ATM address to",
mpc->dev ? mpc->dev->name : "<unknown>");
for (i = 7; i < sizeof(tlv); i++)
dprintk_cont(" %02x", tlv[i]);
dprintk_cont("\n");
if (mpc->dev) {
priv = netdev_priv(mpc->dev);
retval = priv->lane2_ops->associate_req(mpc->dev,
mpc->dev->dev_addr,
tlv, sizeof(tlv));
if (retval == 0)
pr_info("(%s) MPOA device type TLV association failed\n",
mpc->dev->name);
retval = priv->lane2_ops->resolve(mpc->dev, NULL, 1, NULL, NULL);
if (retval < 0)
pr_info("(%s) targetless LE_ARP request failed\n",
mpc->dev->name);
}
}
static void set_mps_mac_addr_rcvd(struct k_message *msg,
struct mpoa_client *client)
{
if (client->number_of_mps_macs)
kfree(client->mps_macs);
client->number_of_mps_macs = 0;
client->mps_macs = kmemdup(msg->MPS_ctrl, ETH_ALEN, GFP_KERNEL);
if (client->mps_macs == NULL) {
pr_info("out of memory\n");
return;
}
client->number_of_mps_macs = 1;
}
/*
* purge egress cache and tell daemon to 'action' (DIE, RELOAD)
*/
static void clean_up(struct k_message *msg, struct mpoa_client *mpc, int action)
{
eg_cache_entry *entry;
msg->type = SND_EGRESS_PURGE;
/* FIXME: This knows too much of the cache structure */
read_lock_irq(&mpc->egress_lock);
entry = mpc->eg_cache;
while (entry != NULL) {
msg->content.eg_info = entry->ctrl_info;
dprintk("cache_id %u\n", entry->ctrl_info.cache_id);
msg_to_mpoad(msg, mpc);
entry = entry->next;
}
read_unlock_irq(&mpc->egress_lock);
msg->type = action;
msg_to_mpoad(msg, mpc);
}
static unsigned long checking_time;
static void mpc_timer_refresh(void)
{
mpc_timer.expires = jiffies + (MPC_P2 * HZ);
checking_time = mpc_timer.expires;
add_timer(&mpc_timer);
}
static void mpc_cache_check(struct timer_list *unused)
{
struct mpoa_client *mpc = mpcs;
static unsigned long previous_resolving_check_time;
static unsigned long previous_refresh_time;
while (mpc != NULL) {
mpc->in_ops->clear_count(mpc);
mpc->eg_ops->clear_expired(mpc);
if (checking_time - previous_resolving_check_time >
mpc->parameters.mpc_p4 * HZ) {
mpc->in_ops->check_resolving(mpc);
previous_resolving_check_time = checking_time;
}
if (checking_time - previous_refresh_time >
mpc->parameters.mpc_p5 * HZ) {
mpc->in_ops->refresh(mpc);
previous_refresh_time = checking_time;
}
mpc = mpc->next;
}
mpc_timer_refresh();
}
static int atm_mpoa_ioctl(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
int err = 0;
struct atm_vcc *vcc = ATM_SD(sock);
if (cmd != ATMMPC_CTRL && cmd != ATMMPC_DATA)
return -ENOIOCTLCMD;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
switch (cmd) {
case ATMMPC_CTRL:
err = atm_mpoa_mpoad_attach(vcc, (int)arg);
if (err >= 0)
sock->state = SS_CONNECTED;
break;
case ATMMPC_DATA:
err = atm_mpoa_vcc_attach(vcc, (void __user *)arg);
break;
default:
break;
}
return err;
}
static struct atm_ioctl atm_ioctl_ops = {
.owner = THIS_MODULE,
.ioctl = atm_mpoa_ioctl,
};
static __init int atm_mpoa_init(void)
{
register_atm_ioctl(&atm_ioctl_ops);
if (mpc_proc_init() != 0)
pr_info("failed to initialize /proc/mpoa\n");
pr_info("mpc.c: initialized\n");
return 0;
}
static void __exit atm_mpoa_cleanup(void)
{
struct mpoa_client *mpc, *tmp;
struct atm_mpoa_qos *qos, *nextqos;
struct lec_priv *priv;
mpc_proc_clean();
del_timer_sync(&mpc_timer);
unregister_netdevice_notifier(&mpoa_notifier);
deregister_atm_ioctl(&atm_ioctl_ops);
mpc = mpcs;
mpcs = NULL;
while (mpc != NULL) {
tmp = mpc->next;
if (mpc->dev != NULL) {
stop_mpc(mpc);
priv = netdev_priv(mpc->dev);
if (priv->lane2_ops != NULL)
priv->lane2_ops->associate_indicator = NULL;
}
ddprintk("about to clear caches\n");
mpc->in_ops->destroy_cache(mpc);
mpc->eg_ops->destroy_cache(mpc);
ddprintk("caches cleared\n");
kfree(mpc->mps_macs);
memset(mpc, 0, sizeof(struct mpoa_client));
ddprintk("about to kfree %p\n", mpc);
kfree(mpc);
ddprintk("next mpc is at %p\n", tmp);
mpc = tmp;
}
qos = qos_head;
qos_head = NULL;
while (qos != NULL) {
nextqos = qos->next;
dprintk("freeing qos entry %p\n", qos);
kfree(qos);
qos = nextqos;
}
}
module_init(atm_mpoa_init);
module_exit(atm_mpoa_cleanup);
MODULE_LICENSE("GPL");
| linux-master | net/atm/mpc.c |
// SPDX-License-Identifier: GPL-2.0
/* net/atm/signaling.c - ATM signaling */
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/errno.h> /* error codes */
#include <linux/kernel.h> /* printk */
#include <linux/skbuff.h>
#include <linux/wait.h>
#include <linux/sched.h> /* jiffies and HZ */
#include <linux/atm.h> /* ATM stuff */
#include <linux/atmsap.h>
#include <linux/atmsvc.h>
#include <linux/atmdev.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include "resources.h"
#include "signaling.h"
struct atm_vcc *sigd = NULL;
static void sigd_put_skb(struct sk_buff *skb)
{
if (!sigd) {
pr_debug("atmsvc: no signaling daemon\n");
kfree_skb(skb);
return;
}
atm_force_charge(sigd, skb->truesize);
skb_queue_tail(&sk_atm(sigd)->sk_receive_queue, skb);
sk_atm(sigd)->sk_data_ready(sk_atm(sigd));
}
static void modify_qos(struct atm_vcc *vcc, struct atmsvc_msg *msg)
{
struct sk_buff *skb;
if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
!test_bit(ATM_VF_READY, &vcc->flags))
return;
msg->type = as_error;
if (!vcc->dev->ops->change_qos)
msg->reply = -EOPNOTSUPP;
else {
/* should lock VCC */
msg->reply = vcc->dev->ops->change_qos(vcc, &msg->qos,
msg->reply);
if (!msg->reply)
msg->type = as_okay;
}
/*
* Should probably just turn around the old skb. But then, the buffer
* space accounting needs to follow the change too. Maybe later.
*/
while (!(skb = alloc_skb(sizeof(struct atmsvc_msg), GFP_KERNEL)))
schedule();
*(struct atmsvc_msg *)skb_put(skb, sizeof(struct atmsvc_msg)) = *msg;
sigd_put_skb(skb);
}
static int sigd_send(struct atm_vcc *vcc, struct sk_buff *skb)
{
struct atmsvc_msg *msg;
struct atm_vcc *session_vcc;
struct sock *sk;
msg = (struct atmsvc_msg *) skb->data;
WARN_ON(refcount_sub_and_test(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc));
vcc = *(struct atm_vcc **) &msg->vcc;
pr_debug("%d (0x%lx)\n", (int)msg->type, (unsigned long)vcc);
sk = sk_atm(vcc);
switch (msg->type) {
case as_okay:
sk->sk_err = -msg->reply;
clear_bit(ATM_VF_WAITING, &vcc->flags);
if (!*vcc->local.sas_addr.prv && !*vcc->local.sas_addr.pub) {
vcc->local.sas_family = AF_ATMSVC;
memcpy(vcc->local.sas_addr.prv,
msg->local.sas_addr.prv, ATM_ESA_LEN);
memcpy(vcc->local.sas_addr.pub,
msg->local.sas_addr.pub, ATM_E164_LEN + 1);
}
session_vcc = vcc->session ? vcc->session : vcc;
if (session_vcc->vpi || session_vcc->vci)
break;
session_vcc->itf = msg->pvc.sap_addr.itf;
session_vcc->vpi = msg->pvc.sap_addr.vpi;
session_vcc->vci = msg->pvc.sap_addr.vci;
if (session_vcc->vpi || session_vcc->vci)
session_vcc->qos = msg->qos;
break;
case as_error:
clear_bit(ATM_VF_REGIS, &vcc->flags);
clear_bit(ATM_VF_READY, &vcc->flags);
sk->sk_err = -msg->reply;
clear_bit(ATM_VF_WAITING, &vcc->flags);
break;
case as_indicate:
vcc = *(struct atm_vcc **)&msg->listen_vcc;
sk = sk_atm(vcc);
pr_debug("as_indicate!!!\n");
lock_sock(sk);
if (sk_acceptq_is_full(sk)) {
sigd_enq(NULL, as_reject, vcc, NULL, NULL);
dev_kfree_skb(skb);
goto as_indicate_complete;
}
sk_acceptq_added(sk);
skb_queue_tail(&sk->sk_receive_queue, skb);
pr_debug("waking sk_sleep(sk) 0x%p\n", sk_sleep(sk));
sk->sk_state_change(sk);
as_indicate_complete:
release_sock(sk);
return 0;
case as_close:
set_bit(ATM_VF_RELEASED, &vcc->flags);
vcc_release_async(vcc, msg->reply);
goto out;
case as_modify:
modify_qos(vcc, msg);
break;
case as_addparty:
case as_dropparty:
WRITE_ONCE(sk->sk_err_soft, -msg->reply);
/* < 0 failure, otherwise ep_ref */
clear_bit(ATM_VF_WAITING, &vcc->flags);
break;
default:
pr_alert("bad message type %d\n", (int)msg->type);
return -EINVAL;
}
sk->sk_state_change(sk);
out:
dev_kfree_skb(skb);
return 0;
}
void sigd_enq2(struct atm_vcc *vcc, enum atmsvc_msg_type type,
struct atm_vcc *listen_vcc, const struct sockaddr_atmpvc *pvc,
const struct sockaddr_atmsvc *svc, const struct atm_qos *qos,
int reply)
{
struct sk_buff *skb;
struct atmsvc_msg *msg;
static unsigned int session = 0;
pr_debug("%d (0x%p)\n", (int)type, vcc);
while (!(skb = alloc_skb(sizeof(struct atmsvc_msg), GFP_KERNEL)))
schedule();
msg = skb_put_zero(skb, sizeof(struct atmsvc_msg));
msg->type = type;
*(struct atm_vcc **) &msg->vcc = vcc;
*(struct atm_vcc **) &msg->listen_vcc = listen_vcc;
msg->reply = reply;
if (qos)
msg->qos = *qos;
if (vcc)
msg->sap = vcc->sap;
if (svc)
msg->svc = *svc;
if (vcc)
msg->local = vcc->local;
if (pvc)
msg->pvc = *pvc;
if (vcc) {
if (type == as_connect && test_bit(ATM_VF_SESSION, &vcc->flags))
msg->session = ++session;
/* every new pmp connect gets the next session number */
}
sigd_put_skb(skb);
if (vcc)
set_bit(ATM_VF_REGIS, &vcc->flags);
}
void sigd_enq(struct atm_vcc *vcc, enum atmsvc_msg_type type,
struct atm_vcc *listen_vcc, const struct sockaddr_atmpvc *pvc,
const struct sockaddr_atmsvc *svc)
{
sigd_enq2(vcc, type, listen_vcc, pvc, svc, vcc ? &vcc->qos : NULL, 0);
/* other ISP applications may use "reply" */
}
static void purge_vcc(struct atm_vcc *vcc)
{
if (sk_atm(vcc)->sk_family == PF_ATMSVC &&
!test_bit(ATM_VF_META, &vcc->flags)) {
set_bit(ATM_VF_RELEASED, &vcc->flags);
clear_bit(ATM_VF_REGIS, &vcc->flags);
vcc_release_async(vcc, -EUNATCH);
}
}
static void sigd_close(struct atm_vcc *vcc)
{
struct sock *s;
int i;
pr_debug("\n");
sigd = NULL;
if (skb_peek(&sk_atm(vcc)->sk_receive_queue))
pr_err("closing with requests pending\n");
skb_queue_purge(&sk_atm(vcc)->sk_receive_queue);
read_lock(&vcc_sklist_lock);
for (i = 0; i < VCC_HTABLE_SIZE; ++i) {
struct hlist_head *head = &vcc_hash[i];
sk_for_each(s, head) {
vcc = atm_sk(s);
purge_vcc(vcc);
}
}
read_unlock(&vcc_sklist_lock);
}
static const struct atmdev_ops sigd_dev_ops = {
.close = sigd_close,
.send = sigd_send
};
static struct atm_dev sigd_dev = {
.ops = &sigd_dev_ops,
.type = "sig",
.number = 999,
.lock = __SPIN_LOCK_UNLOCKED(sigd_dev.lock)
};
int sigd_attach(struct atm_vcc *vcc)
{
if (sigd)
return -EADDRINUSE;
pr_debug("\n");
sigd = vcc;
vcc->dev = &sigd_dev;
vcc_insert_socket(sk_atm(vcc));
set_bit(ATM_VF_META, &vcc->flags);
set_bit(ATM_VF_READY, &vcc->flags);
return 0;
}
| linux-master | net/atm/signaling.c |
// SPDX-License-Identifier: GPL-2.0
/* net/atm/pvc.c - ATM PVC sockets */
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
#include <linux/net.h> /* struct socket, struct proto_ops */
#include <linux/atm.h> /* ATM stuff */
#include <linux/atmdev.h> /* ATM devices */
#include <linux/errno.h> /* error codes */
#include <linux/kernel.h> /* printk */
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/bitops.h>
#include <linux/export.h>
#include <net/sock.h> /* for sock_no_* */
#include "resources.h" /* devs and vccs */
#include "common.h" /* common for PVCs and SVCs */
static int pvc_shutdown(struct socket *sock, int how)
{
return 0;
}
static int pvc_bind(struct socket *sock, struct sockaddr *sockaddr,
int sockaddr_len)
{
struct sock *sk = sock->sk;
struct sockaddr_atmpvc *addr;
struct atm_vcc *vcc;
int error;
if (sockaddr_len != sizeof(struct sockaddr_atmpvc))
return -EINVAL;
addr = (struct sockaddr_atmpvc *)sockaddr;
if (addr->sap_family != AF_ATMPVC)
return -EAFNOSUPPORT;
lock_sock(sk);
vcc = ATM_SD(sock);
if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) {
error = -EBADFD;
goto out;
}
if (test_bit(ATM_VF_PARTIAL, &vcc->flags)) {
if (vcc->vpi != ATM_VPI_UNSPEC)
addr->sap_addr.vpi = vcc->vpi;
if (vcc->vci != ATM_VCI_UNSPEC)
addr->sap_addr.vci = vcc->vci;
}
error = vcc_connect(sock, addr->sap_addr.itf, addr->sap_addr.vpi,
addr->sap_addr.vci);
out:
release_sock(sk);
return error;
}
static int pvc_connect(struct socket *sock, struct sockaddr *sockaddr,
int sockaddr_len, int flags)
{
return pvc_bind(sock, sockaddr, sockaddr_len);
}
static int pvc_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
int error;
lock_sock(sk);
error = vcc_setsockopt(sock, level, optname, optval, optlen);
release_sock(sk);
return error;
}
static int pvc_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
int error;
lock_sock(sk);
error = vcc_getsockopt(sock, level, optname, optval, optlen);
release_sock(sk);
return error;
}
static int pvc_getname(struct socket *sock, struct sockaddr *sockaddr,
int peer)
{
struct sockaddr_atmpvc *addr;
struct atm_vcc *vcc = ATM_SD(sock);
if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags))
return -ENOTCONN;
addr = (struct sockaddr_atmpvc *)sockaddr;
memset(addr, 0, sizeof(*addr));
addr->sap_family = AF_ATMPVC;
addr->sap_addr.itf = vcc->dev->number;
addr->sap_addr.vpi = vcc->vpi;
addr->sap_addr.vci = vcc->vci;
return sizeof(struct sockaddr_atmpvc);
}
static const struct proto_ops pvc_proto_ops = {
.family = PF_ATMPVC,
.owner = THIS_MODULE,
.release = vcc_release,
.bind = pvc_bind,
.connect = pvc_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = pvc_getname,
.poll = vcc_poll,
.ioctl = vcc_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = vcc_compat_ioctl,
#endif
.gettstamp = sock_gettstamp,
.listen = sock_no_listen,
.shutdown = pvc_shutdown,
.setsockopt = pvc_setsockopt,
.getsockopt = pvc_getsockopt,
.sendmsg = vcc_sendmsg,
.recvmsg = vcc_recvmsg,
.mmap = sock_no_mmap,
};
static int pvc_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
if (net != &init_net)
return -EAFNOSUPPORT;
sock->ops = &pvc_proto_ops;
return vcc_create(net, sock, protocol, PF_ATMPVC, kern);
}
static const struct net_proto_family pvc_family_ops = {
.family = PF_ATMPVC,
.create = pvc_create,
.owner = THIS_MODULE,
};
/*
* Initialize the ATM PVC protocol family
*/
int __init atmpvc_init(void)
{
return sock_register(&pvc_family_ops);
}
void atmpvc_exit(void)
{
sock_unregister(PF_ATMPVC);
}
| linux-master | net/atm/pvc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Ioctl handler
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <[email protected]>
*/
#include <linux/capability.h>
#include <linux/compat.h>
#include <linux/kernel.h>
#include <linux/if_bridge.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <linux/times.h>
#include <net/net_namespace.h>
#include <linux/uaccess.h>
#include "br_private.h"
static int get_bridge_ifindices(struct net *net, int *indices, int num)
{
struct net_device *dev;
int i = 0;
rcu_read_lock();
for_each_netdev_rcu(net, dev) {
if (i >= num)
break;
if (netif_is_bridge_master(dev))
indices[i++] = dev->ifindex;
}
rcu_read_unlock();
return i;
}
/* called with RTNL */
static void get_port_ifindices(struct net_bridge *br, int *ifindices, int num)
{
struct net_bridge_port *p;
list_for_each_entry(p, &br->port_list, list) {
if (p->port_no < num)
ifindices[p->port_no] = p->dev->ifindex;
}
}
/*
* Format up to a page worth of forwarding table entries
* userbuf -- where to copy result
* maxnum -- maximum number of entries desired
* (limited to a page for sanity)
* offset -- number of records to skip
*/
static int get_fdb_entries(struct net_bridge *br, void __user *userbuf,
unsigned long maxnum, unsigned long offset)
{
int num;
void *buf;
size_t size;
/* Clamp size to PAGE_SIZE, test maxnum to avoid overflow */
if (maxnum > PAGE_SIZE/sizeof(struct __fdb_entry))
maxnum = PAGE_SIZE/sizeof(struct __fdb_entry);
size = maxnum * sizeof(struct __fdb_entry);
buf = kmalloc(size, GFP_USER);
if (!buf)
return -ENOMEM;
num = br_fdb_fillbuf(br, buf, maxnum, offset);
if (num > 0) {
if (copy_to_user(userbuf, buf,
array_size(num, sizeof(struct __fdb_entry))))
num = -EFAULT;
}
kfree(buf);
return num;
}
/* called with RTNL */
static int add_del_if(struct net_bridge *br, int ifindex, int isadd)
{
struct net *net = dev_net(br->dev);
struct net_device *dev;
int ret;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
dev = __dev_get_by_index(net, ifindex);
if (dev == NULL)
return -EINVAL;
if (isadd)
ret = br_add_if(br, dev, NULL);
else
ret = br_del_if(br, dev);
return ret;
}
#define BR_UARGS_MAX 4
static int br_dev_read_uargs(unsigned long *args, size_t nr_args,
void __user **argp, void __user *data)
{
int ret;
if (nr_args < 2 || nr_args > BR_UARGS_MAX)
return -EINVAL;
if (in_compat_syscall()) {
unsigned int cargs[BR_UARGS_MAX];
int i;
ret = copy_from_user(cargs, data, nr_args * sizeof(*cargs));
if (ret)
goto fault;
for (i = 0; i < nr_args; ++i)
args[i] = cargs[i];
*argp = compat_ptr(args[1]);
} else {
ret = copy_from_user(args, data, nr_args * sizeof(*args));
if (ret)
goto fault;
*argp = (void __user *)args[1];
}
return 0;
fault:
return -EFAULT;
}
/*
* Legacy ioctl's through SIOCDEVPRIVATE
* This interface is deprecated because it was too difficult
* to do the translation for 32/64bit ioctl compatibility.
*/
int br_dev_siocdevprivate(struct net_device *dev, struct ifreq *rq,
void __user *data, int cmd)
{
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_port *p = NULL;
unsigned long args[4];
void __user *argp;
int ret;
ret = br_dev_read_uargs(args, ARRAY_SIZE(args), &argp, data);
if (ret)
return ret;
switch (args[0]) {
case BRCTL_ADD_IF:
case BRCTL_DEL_IF:
return add_del_if(br, args[1], args[0] == BRCTL_ADD_IF);
case BRCTL_GET_BRIDGE_INFO:
{
struct __bridge_info b;
memset(&b, 0, sizeof(struct __bridge_info));
rcu_read_lock();
memcpy(&b.designated_root, &br->designated_root, 8);
memcpy(&b.bridge_id, &br->bridge_id, 8);
b.root_path_cost = br->root_path_cost;
b.max_age = jiffies_to_clock_t(br->max_age);
b.hello_time = jiffies_to_clock_t(br->hello_time);
b.forward_delay = br->forward_delay;
b.bridge_max_age = br->bridge_max_age;
b.bridge_hello_time = br->bridge_hello_time;
b.bridge_forward_delay = jiffies_to_clock_t(br->bridge_forward_delay);
b.topology_change = br->topology_change;
b.topology_change_detected = br->topology_change_detected;
b.root_port = br->root_port;
b.stp_enabled = (br->stp_enabled != BR_NO_STP);
b.ageing_time = jiffies_to_clock_t(br->ageing_time);
b.hello_timer_value = br_timer_value(&br->hello_timer);
b.tcn_timer_value = br_timer_value(&br->tcn_timer);
b.topology_change_timer_value = br_timer_value(&br->topology_change_timer);
b.gc_timer_value = br_timer_value(&br->gc_work.timer);
rcu_read_unlock();
if (copy_to_user((void __user *)args[1], &b, sizeof(b)))
return -EFAULT;
return 0;
}
case BRCTL_GET_PORT_LIST:
{
int num, *indices;
num = args[2];
if (num < 0)
return -EINVAL;
if (num == 0)
num = 256;
if (num > BR_MAX_PORTS)
num = BR_MAX_PORTS;
indices = kcalloc(num, sizeof(int), GFP_KERNEL);
if (indices == NULL)
return -ENOMEM;
get_port_ifindices(br, indices, num);
if (copy_to_user(argp, indices, array_size(num, sizeof(int))))
num = -EFAULT;
kfree(indices);
return num;
}
case BRCTL_SET_BRIDGE_FORWARD_DELAY:
if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
ret = br_set_forward_delay(br, args[1]);
break;
case BRCTL_SET_BRIDGE_HELLO_TIME:
if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
ret = br_set_hello_time(br, args[1]);
break;
case BRCTL_SET_BRIDGE_MAX_AGE:
if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
ret = br_set_max_age(br, args[1]);
break;
case BRCTL_SET_AGEING_TIME:
if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
ret = br_set_ageing_time(br, args[1]);
break;
case BRCTL_GET_PORT_INFO:
{
struct __port_info p;
struct net_bridge_port *pt;
rcu_read_lock();
if ((pt = br_get_port(br, args[2])) == NULL) {
rcu_read_unlock();
return -EINVAL;
}
memset(&p, 0, sizeof(struct __port_info));
memcpy(&p.designated_root, &pt->designated_root, 8);
memcpy(&p.designated_bridge, &pt->designated_bridge, 8);
p.port_id = pt->port_id;
p.designated_port = pt->designated_port;
p.path_cost = pt->path_cost;
p.designated_cost = pt->designated_cost;
p.state = pt->state;
p.top_change_ack = pt->topology_change_ack;
p.config_pending = pt->config_pending;
p.message_age_timer_value = br_timer_value(&pt->message_age_timer);
p.forward_delay_timer_value = br_timer_value(&pt->forward_delay_timer);
p.hold_timer_value = br_timer_value(&pt->hold_timer);
rcu_read_unlock();
if (copy_to_user(argp, &p, sizeof(p)))
return -EFAULT;
return 0;
}
case BRCTL_SET_BRIDGE_STP_STATE:
if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
ret = br_stp_set_enabled(br, args[1], NULL);
break;
case BRCTL_SET_BRIDGE_PRIORITY:
if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
br_stp_set_bridge_priority(br, args[1]);
ret = 0;
break;
case BRCTL_SET_PORT_PRIORITY:
{
if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
spin_lock_bh(&br->lock);
if ((p = br_get_port(br, args[1])) == NULL)
ret = -EINVAL;
else
ret = br_stp_set_port_priority(p, args[2]);
spin_unlock_bh(&br->lock);
break;
}
case BRCTL_SET_PATH_COST:
{
if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
spin_lock_bh(&br->lock);
if ((p = br_get_port(br, args[1])) == NULL)
ret = -EINVAL;
else
ret = br_stp_set_path_cost(p, args[2]);
spin_unlock_bh(&br->lock);
break;
}
case BRCTL_GET_FDB_ENTRIES:
return get_fdb_entries(br, argp, args[2], args[3]);
default:
ret = -EOPNOTSUPP;
}
if (!ret) {
if (p)
br_ifinfo_notify(RTM_NEWLINK, NULL, p);
else
netdev_state_change(br->dev);
}
return ret;
}
static int old_deviceless(struct net *net, void __user *data)
{
unsigned long args[3];
void __user *argp;
int ret;
ret = br_dev_read_uargs(args, ARRAY_SIZE(args), &argp, data);
if (ret)
return ret;
switch (args[0]) {
case BRCTL_GET_VERSION:
return BRCTL_VERSION;
case BRCTL_GET_BRIDGES:
{
int *indices;
int ret = 0;
if (args[2] >= 2048)
return -ENOMEM;
indices = kcalloc(args[2], sizeof(int), GFP_KERNEL);
if (indices == NULL)
return -ENOMEM;
args[2] = get_bridge_ifindices(net, indices, args[2]);
ret = copy_to_user(argp, indices,
array_size(args[2], sizeof(int)))
? -EFAULT : args[2];
kfree(indices);
return ret;
}
case BRCTL_ADD_BRIDGE:
case BRCTL_DEL_BRIDGE:
{
char buf[IFNAMSIZ];
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(buf, argp, IFNAMSIZ))
return -EFAULT;
buf[IFNAMSIZ-1] = 0;
if (args[0] == BRCTL_ADD_BRIDGE)
return br_add_bridge(net, buf);
return br_del_bridge(net, buf);
}
}
return -EOPNOTSUPP;
}
int br_ioctl_stub(struct net *net, struct net_bridge *br, unsigned int cmd,
struct ifreq *ifr, void __user *uarg)
{
int ret = -EOPNOTSUPP;
rtnl_lock();
switch (cmd) {
case SIOCGIFBR:
case SIOCSIFBR:
ret = old_deviceless(net, uarg);
break;
case SIOCBRADDBR:
case SIOCBRDELBR:
{
char buf[IFNAMSIZ];
if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) {
ret = -EPERM;
break;
}
if (copy_from_user(buf, uarg, IFNAMSIZ)) {
ret = -EFAULT;
break;
}
buf[IFNAMSIZ-1] = 0;
if (cmd == SIOCBRADDBR)
ret = br_add_bridge(net, buf);
else
ret = br_del_bridge(net, buf);
}
break;
case SIOCBRADDIF:
case SIOCBRDELIF:
ret = add_del_if(br, ifr->ifr_ifindex, cmd == SIOCBRADDIF);
break;
}
rtnl_unlock();
return ret;
}
| linux-master | net/bridge/br_ioctl.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Forwarding database
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/rculist.h>
#include <linux/spinlock.h>
#include <linux/times.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/jhash.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#include <asm/unaligned.h>
#include <linux/if_vlan.h>
#include <net/switchdev.h>
#include <trace/events/bridge.h>
#include "br_private.h"
static const struct rhashtable_params br_fdb_rht_params = {
.head_offset = offsetof(struct net_bridge_fdb_entry, rhnode),
.key_offset = offsetof(struct net_bridge_fdb_entry, key),
.key_len = sizeof(struct net_bridge_fdb_key),
.automatic_shrinking = true,
};
static struct kmem_cache *br_fdb_cache __read_mostly;
int __init br_fdb_init(void)
{
br_fdb_cache = kmem_cache_create("bridge_fdb_cache",
sizeof(struct net_bridge_fdb_entry),
0,
SLAB_HWCACHE_ALIGN, NULL);
if (!br_fdb_cache)
return -ENOMEM;
return 0;
}
void br_fdb_fini(void)
{
kmem_cache_destroy(br_fdb_cache);
}
int br_fdb_hash_init(struct net_bridge *br)
{
return rhashtable_init(&br->fdb_hash_tbl, &br_fdb_rht_params);
}
void br_fdb_hash_fini(struct net_bridge *br)
{
rhashtable_destroy(&br->fdb_hash_tbl);
}
/* if topology_changing then use forward_delay (default 15 sec)
* otherwise keep longer (default 5 minutes)
*/
static inline unsigned long hold_time(const struct net_bridge *br)
{
return br->topology_change ? br->forward_delay : br->ageing_time;
}
static inline int has_expired(const struct net_bridge *br,
const struct net_bridge_fdb_entry *fdb)
{
return !test_bit(BR_FDB_STATIC, &fdb->flags) &&
!test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags) &&
time_before_eq(fdb->updated + hold_time(br), jiffies);
}
static void fdb_rcu_free(struct rcu_head *head)
{
struct net_bridge_fdb_entry *ent
= container_of(head, struct net_bridge_fdb_entry, rcu);
kmem_cache_free(br_fdb_cache, ent);
}
static int fdb_to_nud(const struct net_bridge *br,
const struct net_bridge_fdb_entry *fdb)
{
if (test_bit(BR_FDB_LOCAL, &fdb->flags))
return NUD_PERMANENT;
else if (test_bit(BR_FDB_STATIC, &fdb->flags))
return NUD_NOARP;
else if (has_expired(br, fdb))
return NUD_STALE;
else
return NUD_REACHABLE;
}
static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
const struct net_bridge_fdb_entry *fdb,
u32 portid, u32 seq, int type, unsigned int flags)
{
const struct net_bridge_port *dst = READ_ONCE(fdb->dst);
unsigned long now = jiffies;
struct nda_cacheinfo ci;
struct nlmsghdr *nlh;
struct ndmsg *ndm;
u32 ext_flags = 0;
nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
if (nlh == NULL)
return -EMSGSIZE;
ndm = nlmsg_data(nlh);
ndm->ndm_family = AF_BRIDGE;
ndm->ndm_pad1 = 0;
ndm->ndm_pad2 = 0;
ndm->ndm_flags = 0;
ndm->ndm_type = 0;
ndm->ndm_ifindex = dst ? dst->dev->ifindex : br->dev->ifindex;
ndm->ndm_state = fdb_to_nud(br, fdb);
if (test_bit(BR_FDB_OFFLOADED, &fdb->flags))
ndm->ndm_flags |= NTF_OFFLOADED;
if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
ndm->ndm_flags |= NTF_EXT_LEARNED;
if (test_bit(BR_FDB_STICKY, &fdb->flags))
ndm->ndm_flags |= NTF_STICKY;
if (test_bit(BR_FDB_LOCKED, &fdb->flags))
ext_flags |= NTF_EXT_LOCKED;
if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.addr))
goto nla_put_failure;
if (nla_put_u32(skb, NDA_MASTER, br->dev->ifindex))
goto nla_put_failure;
if (nla_put_u32(skb, NDA_FLAGS_EXT, ext_flags))
goto nla_put_failure;
ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
ci.ndm_confirmed = 0;
ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
ci.ndm_refcnt = 0;
if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
goto nla_put_failure;
if (fdb->key.vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16),
&fdb->key.vlan_id))
goto nla_put_failure;
if (test_bit(BR_FDB_NOTIFY, &fdb->flags)) {
struct nlattr *nest = nla_nest_start(skb, NDA_FDB_EXT_ATTRS);
u8 notify_bits = FDB_NOTIFY_BIT;
if (!nest)
goto nla_put_failure;
if (test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags))
notify_bits |= FDB_NOTIFY_INACTIVE_BIT;
if (nla_put_u8(skb, NFEA_ACTIVITY_NOTIFY, notify_bits)) {
nla_nest_cancel(skb, nest);
goto nla_put_failure;
}
nla_nest_end(skb, nest);
}
nlmsg_end(skb, nlh);
return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
static inline size_t fdb_nlmsg_size(void)
{
return NLMSG_ALIGN(sizeof(struct ndmsg))
+ nla_total_size(ETH_ALEN) /* NDA_LLADDR */
+ nla_total_size(sizeof(u32)) /* NDA_MASTER */
+ nla_total_size(sizeof(u32)) /* NDA_FLAGS_EXT */
+ nla_total_size(sizeof(u16)) /* NDA_VLAN */
+ nla_total_size(sizeof(struct nda_cacheinfo))
+ nla_total_size(0) /* NDA_FDB_EXT_ATTRS */
+ nla_total_size(sizeof(u8)); /* NFEA_ACTIVITY_NOTIFY */
}
static void fdb_notify(struct net_bridge *br,
const struct net_bridge_fdb_entry *fdb, int type,
bool swdev_notify)
{
struct net *net = dev_net(br->dev);
struct sk_buff *skb;
int err = -ENOBUFS;
if (swdev_notify)
br_switchdev_fdb_notify(br, fdb, type);
skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC);
if (skb == NULL)
goto errout;
err = fdb_fill_info(skb, br, fdb, 0, 0, type, 0);
if (err < 0) {
/* -EMSGSIZE implies BUG in fdb_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout;
}
rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
return;
errout:
rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
}
static struct net_bridge_fdb_entry *fdb_find_rcu(struct rhashtable *tbl,
const unsigned char *addr,
__u16 vid)
{
struct net_bridge_fdb_key key;
WARN_ON_ONCE(!rcu_read_lock_held());
key.vlan_id = vid;
memcpy(key.addr.addr, addr, sizeof(key.addr.addr));
return rhashtable_lookup(tbl, &key, br_fdb_rht_params);
}
/* requires bridge hash_lock */
static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br,
const unsigned char *addr,
__u16 vid)
{
struct net_bridge_fdb_entry *fdb;
lockdep_assert_held_once(&br->hash_lock);
rcu_read_lock();
fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
rcu_read_unlock();
return fdb;
}
struct net_device *br_fdb_find_port(const struct net_device *br_dev,
const unsigned char *addr,
__u16 vid)
{
struct net_bridge_fdb_entry *f;
struct net_device *dev = NULL;
struct net_bridge *br;
ASSERT_RTNL();
if (!netif_is_bridge_master(br_dev))
return NULL;
br = netdev_priv(br_dev);
rcu_read_lock();
f = br_fdb_find_rcu(br, addr, vid);
if (f && f->dst)
dev = f->dst->dev;
rcu_read_unlock();
return dev;
}
EXPORT_SYMBOL_GPL(br_fdb_find_port);
struct net_bridge_fdb_entry *br_fdb_find_rcu(struct net_bridge *br,
const unsigned char *addr,
__u16 vid)
{
return fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
}
/* When a static FDB entry is added, the mac address from the entry is
* added to the bridge private HW address list and all required ports
* are then updated with the new information.
* Called under RTNL.
*/
static void fdb_add_hw_addr(struct net_bridge *br, const unsigned char *addr)
{
int err;
struct net_bridge_port *p;
ASSERT_RTNL();
list_for_each_entry(p, &br->port_list, list) {
if (!br_promisc_port(p)) {
err = dev_uc_add(p->dev, addr);
if (err)
goto undo;
}
}
return;
undo:
list_for_each_entry_continue_reverse(p, &br->port_list, list) {
if (!br_promisc_port(p))
dev_uc_del(p->dev, addr);
}
}
/* When a static FDB entry is deleted, the HW address from that entry is
* also removed from the bridge private HW address list and updates all
* the ports with needed information.
* Called under RTNL.
*/
static void fdb_del_hw_addr(struct net_bridge *br, const unsigned char *addr)
{
struct net_bridge_port *p;
ASSERT_RTNL();
list_for_each_entry(p, &br->port_list, list) {
if (!br_promisc_port(p))
dev_uc_del(p->dev, addr);
}
}
static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f,
bool swdev_notify)
{
trace_fdb_delete(br, f);
if (test_bit(BR_FDB_STATIC, &f->flags))
fdb_del_hw_addr(br, f->key.addr.addr);
hlist_del_init_rcu(&f->fdb_node);
rhashtable_remove_fast(&br->fdb_hash_tbl, &f->rhnode,
br_fdb_rht_params);
fdb_notify(br, f, RTM_DELNEIGH, swdev_notify);
call_rcu(&f->rcu, fdb_rcu_free);
}
/* Delete a local entry if no other port had the same address. */
static void fdb_delete_local(struct net_bridge *br,
const struct net_bridge_port *p,
struct net_bridge_fdb_entry *f)
{
const unsigned char *addr = f->key.addr.addr;
struct net_bridge_vlan_group *vg;
const struct net_bridge_vlan *v;
struct net_bridge_port *op;
u16 vid = f->key.vlan_id;
/* Maybe another port has same hw addr? */
list_for_each_entry(op, &br->port_list, list) {
vg = nbp_vlan_group(op);
if (op != p && ether_addr_equal(op->dev->dev_addr, addr) &&
(!vid || br_vlan_find(vg, vid))) {
f->dst = op;
clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
return;
}
}
vg = br_vlan_group(br);
v = br_vlan_find(vg, vid);
/* Maybe bridge device has same hw addr? */
if (p && ether_addr_equal(br->dev->dev_addr, addr) &&
(!vid || (v && br_vlan_should_use(v)))) {
f->dst = NULL;
clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
return;
}
fdb_delete(br, f, true);
}
void br_fdb_find_delete_local(struct net_bridge *br,
const struct net_bridge_port *p,
const unsigned char *addr, u16 vid)
{
struct net_bridge_fdb_entry *f;
spin_lock_bh(&br->hash_lock);
f = br_fdb_find(br, addr, vid);
if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
!test_bit(BR_FDB_ADDED_BY_USER, &f->flags) && f->dst == p)
fdb_delete_local(br, p, f);
spin_unlock_bh(&br->hash_lock);
}
static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br,
struct net_bridge_port *source,
const unsigned char *addr,
__u16 vid,
unsigned long flags)
{
struct net_bridge_fdb_entry *fdb;
int err;
fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
if (!fdb)
return NULL;
memcpy(fdb->key.addr.addr, addr, ETH_ALEN);
WRITE_ONCE(fdb->dst, source);
fdb->key.vlan_id = vid;
fdb->flags = flags;
fdb->updated = fdb->used = jiffies;
err = rhashtable_lookup_insert_fast(&br->fdb_hash_tbl, &fdb->rhnode,
br_fdb_rht_params);
if (err) {
kmem_cache_free(br_fdb_cache, fdb);
return NULL;
}
hlist_add_head_rcu(&fdb->fdb_node, &br->fdb_list);
return fdb;
}
static int fdb_add_local(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr, u16 vid)
{
struct net_bridge_fdb_entry *fdb;
if (!is_valid_ether_addr(addr))
return -EINVAL;
fdb = br_fdb_find(br, addr, vid);
if (fdb) {
/* it is okay to have multiple ports with same
* address, just use the first one.
*/
if (test_bit(BR_FDB_LOCAL, &fdb->flags))
return 0;
br_warn(br, "adding interface %s with same address as a received packet (addr:%pM, vlan:%u)\n",
source ? source->dev->name : br->dev->name, addr, vid);
fdb_delete(br, fdb, true);
}
fdb = fdb_create(br, source, addr, vid,
BIT(BR_FDB_LOCAL) | BIT(BR_FDB_STATIC));
if (!fdb)
return -ENOMEM;
fdb_add_hw_addr(br, addr);
fdb_notify(br, fdb, RTM_NEWNEIGH, true);
return 0;
}
void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
{
struct net_bridge_vlan_group *vg;
struct net_bridge_fdb_entry *f;
struct net_bridge *br = p->br;
struct net_bridge_vlan *v;
spin_lock_bh(&br->hash_lock);
vg = nbp_vlan_group(p);
hlist_for_each_entry(f, &br->fdb_list, fdb_node) {
if (f->dst == p && test_bit(BR_FDB_LOCAL, &f->flags) &&
!test_bit(BR_FDB_ADDED_BY_USER, &f->flags)) {
/* delete old one */
fdb_delete_local(br, p, f);
/* if this port has no vlan information
* configured, we can safely be done at
* this point.
*/
if (!vg || !vg->num_vlans)
goto insert;
}
}
insert:
/* insert new address, may fail if invalid address or dup. */
fdb_add_local(br, p, newaddr, 0);
if (!vg || !vg->num_vlans)
goto done;
/* Now add entries for every VLAN configured on the port.
* This function runs under RTNL so the bitmap will not change
* from under us.
*/
list_for_each_entry(v, &vg->vlan_list, vlist)
fdb_add_local(br, p, newaddr, v->vid);
done:
spin_unlock_bh(&br->hash_lock);
}
void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
{
struct net_bridge_vlan_group *vg;
struct net_bridge_fdb_entry *f;
struct net_bridge_vlan *v;
spin_lock_bh(&br->hash_lock);
/* If old entry was unassociated with any port, then delete it. */
f = br_fdb_find(br, br->dev->dev_addr, 0);
if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
!f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
fdb_delete_local(br, NULL, f);
fdb_add_local(br, NULL, newaddr, 0);
vg = br_vlan_group(br);
if (!vg || !vg->num_vlans)
goto out;
/* Now remove and add entries for every VLAN configured on the
* bridge. This function runs under RTNL so the bitmap will not
* change from under us.
*/
list_for_each_entry(v, &vg->vlan_list, vlist) {
if (!br_vlan_should_use(v))
continue;
f = br_fdb_find(br, br->dev->dev_addr, v->vid);
if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
!f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
fdb_delete_local(br, NULL, f);
fdb_add_local(br, NULL, newaddr, v->vid);
}
out:
spin_unlock_bh(&br->hash_lock);
}
void br_fdb_cleanup(struct work_struct *work)
{
struct net_bridge *br = container_of(work, struct net_bridge,
gc_work.work);
struct net_bridge_fdb_entry *f = NULL;
unsigned long delay = hold_time(br);
unsigned long work_delay = delay;
unsigned long now = jiffies;
/* this part is tricky, in order to avoid blocking learning and
* consequently forwarding, we rely on rcu to delete objects with
* delayed freeing allowing us to continue traversing
*/
rcu_read_lock();
hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
unsigned long this_timer = f->updated + delay;
if (test_bit(BR_FDB_STATIC, &f->flags) ||
test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags)) {
if (test_bit(BR_FDB_NOTIFY, &f->flags)) {
if (time_after(this_timer, now))
work_delay = min(work_delay,
this_timer - now);
else if (!test_and_set_bit(BR_FDB_NOTIFY_INACTIVE,
&f->flags))
fdb_notify(br, f, RTM_NEWNEIGH, false);
}
continue;
}
if (time_after(this_timer, now)) {
work_delay = min(work_delay, this_timer - now);
} else {
spin_lock_bh(&br->hash_lock);
if (!hlist_unhashed(&f->fdb_node))
fdb_delete(br, f, true);
spin_unlock_bh(&br->hash_lock);
}
}
rcu_read_unlock();
/* Cleanup minimum 10 milliseconds apart */
work_delay = max_t(unsigned long, work_delay, msecs_to_jiffies(10));
mod_delayed_work(system_long_wq, &br->gc_work, work_delay);
}
static bool __fdb_flush_matches(const struct net_bridge *br,
const struct net_bridge_fdb_entry *f,
const struct net_bridge_fdb_flush_desc *desc)
{
const struct net_bridge_port *dst = READ_ONCE(f->dst);
int port_ifidx = dst ? dst->dev->ifindex : br->dev->ifindex;
if (desc->vlan_id && desc->vlan_id != f->key.vlan_id)
return false;
if (desc->port_ifindex && desc->port_ifindex != port_ifidx)
return false;
if (desc->flags_mask && (f->flags & desc->flags_mask) != desc->flags)
return false;
return true;
}
/* Flush forwarding database entries matching the description */
void br_fdb_flush(struct net_bridge *br,
const struct net_bridge_fdb_flush_desc *desc)
{
struct net_bridge_fdb_entry *f;
rcu_read_lock();
hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
if (!__fdb_flush_matches(br, f, desc))
continue;
spin_lock_bh(&br->hash_lock);
if (!hlist_unhashed(&f->fdb_node))
fdb_delete(br, f, true);
spin_unlock_bh(&br->hash_lock);
}
rcu_read_unlock();
}
static unsigned long __ndm_state_to_fdb_flags(u16 ndm_state)
{
unsigned long flags = 0;
if (ndm_state & NUD_PERMANENT)
__set_bit(BR_FDB_LOCAL, &flags);
if (ndm_state & NUD_NOARP)
__set_bit(BR_FDB_STATIC, &flags);
return flags;
}
static unsigned long __ndm_flags_to_fdb_flags(u8 ndm_flags)
{
unsigned long flags = 0;
if (ndm_flags & NTF_USE)
__set_bit(BR_FDB_ADDED_BY_USER, &flags);
if (ndm_flags & NTF_EXT_LEARNED)
__set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &flags);
if (ndm_flags & NTF_OFFLOADED)
__set_bit(BR_FDB_OFFLOADED, &flags);
if (ndm_flags & NTF_STICKY)
__set_bit(BR_FDB_STICKY, &flags);
return flags;
}
static int __fdb_flush_validate_ifindex(const struct net_bridge *br,
int ifindex,
struct netlink_ext_ack *extack)
{
const struct net_device *dev;
dev = __dev_get_by_index(dev_net(br->dev), ifindex);
if (!dev) {
NL_SET_ERR_MSG_MOD(extack, "Unknown flush device ifindex");
return -ENODEV;
}
if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev)) {
NL_SET_ERR_MSG_MOD(extack, "Flush device is not a bridge or bridge port");
return -EINVAL;
}
if (netif_is_bridge_master(dev) && dev != br->dev) {
NL_SET_ERR_MSG_MOD(extack,
"Flush bridge device does not match target bridge device");
return -EINVAL;
}
if (netif_is_bridge_port(dev)) {
struct net_bridge_port *p = br_port_get_rtnl(dev);
if (p->br != br) {
NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
return -EINVAL;
}
}
return 0;
}
int br_fdb_delete_bulk(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev, u16 vid,
struct netlink_ext_ack *extack)
{
u8 ndm_flags = ndm->ndm_flags & ~FDB_FLUSH_IGNORED_NDM_FLAGS;
struct net_bridge_fdb_flush_desc desc = { .vlan_id = vid };
struct net_bridge_port *p = NULL;
struct net_bridge *br;
if (netif_is_bridge_master(dev)) {
br = netdev_priv(dev);
} else {
p = br_port_get_rtnl(dev);
if (!p) {
NL_SET_ERR_MSG_MOD(extack, "Device is not a bridge port");
return -EINVAL;
}
br = p->br;
}
if (ndm_flags & ~FDB_FLUSH_ALLOWED_NDM_FLAGS) {
NL_SET_ERR_MSG(extack, "Unsupported fdb flush ndm flag bits set");
return -EINVAL;
}
if (ndm->ndm_state & ~FDB_FLUSH_ALLOWED_NDM_STATES) {
NL_SET_ERR_MSG(extack, "Unsupported fdb flush ndm state bits set");
return -EINVAL;
}
desc.flags |= __ndm_state_to_fdb_flags(ndm->ndm_state);
desc.flags |= __ndm_flags_to_fdb_flags(ndm_flags);
if (tb[NDA_NDM_STATE_MASK]) {
u16 ndm_state_mask = nla_get_u16(tb[NDA_NDM_STATE_MASK]);
desc.flags_mask |= __ndm_state_to_fdb_flags(ndm_state_mask);
}
if (tb[NDA_NDM_FLAGS_MASK]) {
u8 ndm_flags_mask = nla_get_u8(tb[NDA_NDM_FLAGS_MASK]);
desc.flags_mask |= __ndm_flags_to_fdb_flags(ndm_flags_mask);
}
if (tb[NDA_IFINDEX]) {
int err, ifidx = nla_get_s32(tb[NDA_IFINDEX]);
err = __fdb_flush_validate_ifindex(br, ifidx, extack);
if (err)
return err;
desc.port_ifindex = ifidx;
} else if (p) {
/* flush was invoked with port device and NTF_MASTER */
desc.port_ifindex = p->dev->ifindex;
}
br_debug(br, "flushing port ifindex: %d vlan id: %u flags: 0x%lx flags mask: 0x%lx\n",
desc.port_ifindex, desc.vlan_id, desc.flags, desc.flags_mask);
br_fdb_flush(br, &desc);
return 0;
}
/* Flush all entries referring to a specific port.
* if do_all is set also flush static entries
* if vid is set delete all entries that match the vlan_id
*/
void br_fdb_delete_by_port(struct net_bridge *br,
const struct net_bridge_port *p,
u16 vid,
int do_all)
{
struct net_bridge_fdb_entry *f;
struct hlist_node *tmp;
spin_lock_bh(&br->hash_lock);
hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) {
if (f->dst != p)
continue;
if (!do_all)
if (test_bit(BR_FDB_STATIC, &f->flags) ||
(test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags) &&
!test_bit(BR_FDB_OFFLOADED, &f->flags)) ||
(vid && f->key.vlan_id != vid))
continue;
if (test_bit(BR_FDB_LOCAL, &f->flags))
fdb_delete_local(br, p, f);
else
fdb_delete(br, f, true);
}
spin_unlock_bh(&br->hash_lock);
}
#if IS_ENABLED(CONFIG_ATM_LANE)
/* Interface used by ATM LANE hook to test
* if an addr is on some other bridge port */
int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
{
struct net_bridge_fdb_entry *fdb;
struct net_bridge_port *port;
int ret;
rcu_read_lock();
port = br_port_get_rcu(dev);
if (!port)
ret = 0;
else {
const struct net_bridge_port *dst = NULL;
fdb = br_fdb_find_rcu(port->br, addr, 0);
if (fdb)
dst = READ_ONCE(fdb->dst);
ret = dst && dst->dev != dev &&
dst->state == BR_STATE_FORWARDING;
}
rcu_read_unlock();
return ret;
}
#endif /* CONFIG_ATM_LANE */
/*
* Fill buffer with forwarding table records in
* the API format.
*/
int br_fdb_fillbuf(struct net_bridge *br, void *buf,
unsigned long maxnum, unsigned long skip)
{
struct net_bridge_fdb_entry *f;
struct __fdb_entry *fe = buf;
int num = 0;
memset(buf, 0, maxnum*sizeof(struct __fdb_entry));
rcu_read_lock();
hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
if (num >= maxnum)
break;
if (has_expired(br, f))
continue;
/* ignore pseudo entry for local MAC address */
if (!f->dst)
continue;
if (skip) {
--skip;
continue;
}
/* convert from internal format to API */
memcpy(fe->mac_addr, f->key.addr.addr, ETH_ALEN);
/* due to ABI compat need to split into hi/lo */
fe->port_no = f->dst->port_no;
fe->port_hi = f->dst->port_no >> 8;
fe->is_local = test_bit(BR_FDB_LOCAL, &f->flags);
if (!test_bit(BR_FDB_STATIC, &f->flags))
fe->ageing_timer_value = jiffies_delta_to_clock_t(jiffies - f->updated);
++fe;
++num;
}
rcu_read_unlock();
return num;
}
/* Add entry for local address of interface */
int br_fdb_add_local(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr, u16 vid)
{
int ret;
spin_lock_bh(&br->hash_lock);
ret = fdb_add_local(br, source, addr, vid);
spin_unlock_bh(&br->hash_lock);
return ret;
}
/* returns true if the fdb was modified */
static bool __fdb_mark_active(struct net_bridge_fdb_entry *fdb)
{
return !!(test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags) &&
test_and_clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags));
}
void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr, u16 vid, unsigned long flags)
{
struct net_bridge_fdb_entry *fdb;
/* some users want to always flood. */
if (hold_time(br) == 0)
return;
fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
if (likely(fdb)) {
/* attempt to update an entry for a local interface */
if (unlikely(test_bit(BR_FDB_LOCAL, &fdb->flags))) {
if (net_ratelimit())
br_warn(br, "received packet on %s with own address as source address (addr:%pM, vlan:%u)\n",
source->dev->name, addr, vid);
} else {
unsigned long now = jiffies;
bool fdb_modified = false;
if (now != fdb->updated) {
fdb->updated = now;
fdb_modified = __fdb_mark_active(fdb);
}
/* fastpath: update of existing entry */
if (unlikely(source != READ_ONCE(fdb->dst) &&
!test_bit(BR_FDB_STICKY, &fdb->flags))) {
br_switchdev_fdb_notify(br, fdb, RTM_DELNEIGH);
WRITE_ONCE(fdb->dst, source);
fdb_modified = true;
/* Take over HW learned entry */
if (unlikely(test_bit(BR_FDB_ADDED_BY_EXT_LEARN,
&fdb->flags)))
clear_bit(BR_FDB_ADDED_BY_EXT_LEARN,
&fdb->flags);
/* Clear locked flag when roaming to an
* unlocked port.
*/
if (unlikely(test_bit(BR_FDB_LOCKED, &fdb->flags)))
clear_bit(BR_FDB_LOCKED, &fdb->flags);
}
if (unlikely(test_bit(BR_FDB_ADDED_BY_USER, &flags)))
set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
if (unlikely(fdb_modified)) {
trace_br_fdb_update(br, source, addr, vid, flags);
fdb_notify(br, fdb, RTM_NEWNEIGH, true);
}
}
} else {
spin_lock(&br->hash_lock);
fdb = fdb_create(br, source, addr, vid, flags);
if (fdb) {
trace_br_fdb_update(br, source, addr, vid, flags);
fdb_notify(br, fdb, RTM_NEWNEIGH, true);
}
/* else we lose race and someone else inserts
* it first, don't bother updating
*/
spin_unlock(&br->hash_lock);
}
}
/* Dump information about entries, in response to GETNEIGH */
int br_fdb_dump(struct sk_buff *skb,
struct netlink_callback *cb,
struct net_device *dev,
struct net_device *filter_dev,
int *idx)
{
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_fdb_entry *f;
int err = 0;
if (!netif_is_bridge_master(dev))
return err;
if (!filter_dev) {
err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
if (err < 0)
return err;
}
rcu_read_lock();
hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
if (*idx < cb->args[2])
goto skip;
if (filter_dev && (!f->dst || f->dst->dev != filter_dev)) {
if (filter_dev != dev)
goto skip;
/* !f->dst is a special case for bridge
* It means the MAC belongs to the bridge
* Therefore need a little more filtering
* we only want to dump the !f->dst case
*/
if (f->dst)
goto skip;
}
if (!filter_dev && f->dst)
goto skip;
err = fdb_fill_info(skb, br, f,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
RTM_NEWNEIGH,
NLM_F_MULTI);
if (err < 0)
break;
skip:
*idx += 1;
}
rcu_read_unlock();
return err;
}
int br_fdb_get(struct sk_buff *skb,
struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr,
u16 vid, u32 portid, u32 seq,
struct netlink_ext_ack *extack)
{
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_fdb_entry *f;
int err = 0;
rcu_read_lock();
f = br_fdb_find_rcu(br, addr, vid);
if (!f) {
NL_SET_ERR_MSG(extack, "Fdb entry not found");
err = -ENOENT;
goto errout;
}
err = fdb_fill_info(skb, br, f, portid, seq,
RTM_NEWNEIGH, 0);
errout:
rcu_read_unlock();
return err;
}
/* returns true if the fdb is modified */
static bool fdb_handle_notify(struct net_bridge_fdb_entry *fdb, u8 notify)
{
bool modified = false;
/* allow to mark an entry as inactive, usually done on creation */
if ((notify & FDB_NOTIFY_INACTIVE_BIT) &&
!test_and_set_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags))
modified = true;
if ((notify & FDB_NOTIFY_BIT) &&
!test_and_set_bit(BR_FDB_NOTIFY, &fdb->flags)) {
/* enabled activity tracking */
modified = true;
} else if (!(notify & FDB_NOTIFY_BIT) &&
test_and_clear_bit(BR_FDB_NOTIFY, &fdb->flags)) {
/* disabled activity tracking, clear notify state */
clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags);
modified = true;
}
return modified;
}
/* Update (create or replace) forwarding database entry */
static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
const u8 *addr, struct ndmsg *ndm, u16 flags, u16 vid,
struct nlattr *nfea_tb[])
{
bool is_sticky = !!(ndm->ndm_flags & NTF_STICKY);
bool refresh = !nfea_tb[NFEA_DONT_REFRESH];
struct net_bridge_fdb_entry *fdb;
u16 state = ndm->ndm_state;
bool modified = false;
u8 notify = 0;
/* If the port cannot learn allow only local and static entries */
if (source && !(state & NUD_PERMANENT) && !(state & NUD_NOARP) &&
!(source->state == BR_STATE_LEARNING ||
source->state == BR_STATE_FORWARDING))
return -EPERM;
if (!source && !(state & NUD_PERMANENT)) {
pr_info("bridge: RTM_NEWNEIGH %s without NUD_PERMANENT\n",
br->dev->name);
return -EINVAL;
}
if (is_sticky && (state & NUD_PERMANENT))
return -EINVAL;
if (nfea_tb[NFEA_ACTIVITY_NOTIFY]) {
notify = nla_get_u8(nfea_tb[NFEA_ACTIVITY_NOTIFY]);
if ((notify & ~BR_FDB_NOTIFY_SETTABLE_BITS) ||
(notify & BR_FDB_NOTIFY_SETTABLE_BITS) == FDB_NOTIFY_INACTIVE_BIT)
return -EINVAL;
}
fdb = br_fdb_find(br, addr, vid);
if (fdb == NULL) {
if (!(flags & NLM_F_CREATE))
return -ENOENT;
fdb = fdb_create(br, source, addr, vid, 0);
if (!fdb)
return -ENOMEM;
modified = true;
} else {
if (flags & NLM_F_EXCL)
return -EEXIST;
if (READ_ONCE(fdb->dst) != source) {
WRITE_ONCE(fdb->dst, source);
modified = true;
}
}
if (fdb_to_nud(br, fdb) != state) {
if (state & NUD_PERMANENT) {
set_bit(BR_FDB_LOCAL, &fdb->flags);
if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags))
fdb_add_hw_addr(br, addr);
} else if (state & NUD_NOARP) {
clear_bit(BR_FDB_LOCAL, &fdb->flags);
if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags))
fdb_add_hw_addr(br, addr);
} else {
clear_bit(BR_FDB_LOCAL, &fdb->flags);
if (test_and_clear_bit(BR_FDB_STATIC, &fdb->flags))
fdb_del_hw_addr(br, addr);
}
modified = true;
}
if (is_sticky != test_bit(BR_FDB_STICKY, &fdb->flags)) {
change_bit(BR_FDB_STICKY, &fdb->flags);
modified = true;
}
if (test_and_clear_bit(BR_FDB_LOCKED, &fdb->flags))
modified = true;
if (fdb_handle_notify(fdb, notify))
modified = true;
set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
fdb->used = jiffies;
if (modified) {
if (refresh)
fdb->updated = jiffies;
fdb_notify(br, fdb, RTM_NEWNEIGH, true);
}
return 0;
}
static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
struct net_bridge_port *p, const unsigned char *addr,
u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[],
struct netlink_ext_ack *extack)
{
int err = 0;
if (ndm->ndm_flags & NTF_USE) {
if (!p) {
pr_info("bridge: RTM_NEWNEIGH %s with NTF_USE is not supported\n",
br->dev->name);
return -EINVAL;
}
if (!nbp_state_should_learn(p))
return 0;
local_bh_disable();
rcu_read_lock();
br_fdb_update(br, p, addr, vid, BIT(BR_FDB_ADDED_BY_USER));
rcu_read_unlock();
local_bh_enable();
} else if (ndm->ndm_flags & NTF_EXT_LEARNED) {
if (!p && !(ndm->ndm_state & NUD_PERMANENT)) {
NL_SET_ERR_MSG_MOD(extack,
"FDB entry towards bridge must be permanent");
return -EINVAL;
}
err = br_fdb_external_learn_add(br, p, addr, vid, false, true);
} else {
spin_lock_bh(&br->hash_lock);
err = fdb_add_entry(br, p, addr, ndm, nlh_flags, vid, nfea_tb);
spin_unlock_bh(&br->hash_lock);
}
return err;
}
static const struct nla_policy br_nda_fdb_pol[NFEA_MAX + 1] = {
[NFEA_ACTIVITY_NOTIFY] = { .type = NLA_U8 },
[NFEA_DONT_REFRESH] = { .type = NLA_FLAG },
};
/* Add new permanent fdb entry with RTM_NEWNEIGH */
int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid, u16 nlh_flags,
struct netlink_ext_ack *extack)
{
struct nlattr *nfea_tb[NFEA_MAX + 1], *attr;
struct net_bridge_vlan_group *vg;
struct net_bridge_port *p = NULL;
struct net_bridge_vlan *v;
struct net_bridge *br = NULL;
u32 ext_flags = 0;
int err = 0;
trace_br_fdb_add(ndm, dev, addr, vid, nlh_flags);
if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state);
return -EINVAL;
}
if (is_zero_ether_addr(addr)) {
pr_info("bridge: RTM_NEWNEIGH with invalid ether address\n");
return -EINVAL;
}
if (netif_is_bridge_master(dev)) {
br = netdev_priv(dev);
vg = br_vlan_group(br);
} else {
p = br_port_get_rtnl(dev);
if (!p) {
pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
dev->name);
return -EINVAL;
}
br = p->br;
vg = nbp_vlan_group(p);
}
if (tb[NDA_FLAGS_EXT])
ext_flags = nla_get_u32(tb[NDA_FLAGS_EXT]);
if (ext_flags & NTF_EXT_LOCKED) {
NL_SET_ERR_MSG_MOD(extack, "Cannot add FDB entry with \"locked\" flag set");
return -EINVAL;
}
if (tb[NDA_FDB_EXT_ATTRS]) {
attr = tb[NDA_FDB_EXT_ATTRS];
err = nla_parse_nested(nfea_tb, NFEA_MAX, attr,
br_nda_fdb_pol, extack);
if (err)
return err;
} else {
memset(nfea_tb, 0, sizeof(struct nlattr *) * (NFEA_MAX + 1));
}
if (vid) {
v = br_vlan_find(vg, vid);
if (!v || !br_vlan_should_use(v)) {
pr_info("bridge: RTM_NEWNEIGH with unconfigured vlan %d on %s\n", vid, dev->name);
return -EINVAL;
}
/* VID was specified, so use it. */
err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb,
extack);
} else {
err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb,
extack);
if (err || !vg || !vg->num_vlans)
goto out;
/* We have vlans configured on this port and user didn't
* specify a VLAN. To be nice, add/update entry for every
* vlan on this port.
*/
list_for_each_entry(v, &vg->vlan_list, vlist) {
if (!br_vlan_should_use(v))
continue;
err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid,
nfea_tb, extack);
if (err)
goto out;
}
}
out:
return err;
}
static int fdb_delete_by_addr_and_port(struct net_bridge *br,
const struct net_bridge_port *p,
const u8 *addr, u16 vlan)
{
struct net_bridge_fdb_entry *fdb;
fdb = br_fdb_find(br, addr, vlan);
if (!fdb || READ_ONCE(fdb->dst) != p)
return -ENOENT;
fdb_delete(br, fdb, true);
return 0;
}
static int __br_fdb_delete(struct net_bridge *br,
const struct net_bridge_port *p,
const unsigned char *addr, u16 vid)
{
int err;
spin_lock_bh(&br->hash_lock);
err = fdb_delete_by_addr_and_port(br, p, addr, vid);
spin_unlock_bh(&br->hash_lock);
return err;
}
/* Remove neighbor entry with RTM_DELNEIGH */
int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
struct netlink_ext_ack *extack)
{
struct net_bridge_vlan_group *vg;
struct net_bridge_port *p = NULL;
struct net_bridge_vlan *v;
struct net_bridge *br;
int err;
if (netif_is_bridge_master(dev)) {
br = netdev_priv(dev);
vg = br_vlan_group(br);
} else {
p = br_port_get_rtnl(dev);
if (!p) {
pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
dev->name);
return -EINVAL;
}
vg = nbp_vlan_group(p);
br = p->br;
}
if (vid) {
v = br_vlan_find(vg, vid);
if (!v) {
pr_info("bridge: RTM_DELNEIGH with unconfigured vlan %d on %s\n", vid, dev->name);
return -EINVAL;
}
err = __br_fdb_delete(br, p, addr, vid);
} else {
err = -ENOENT;
err &= __br_fdb_delete(br, p, addr, 0);
if (!vg || !vg->num_vlans)
return err;
list_for_each_entry(v, &vg->vlan_list, vlist) {
if (!br_vlan_should_use(v))
continue;
err &= __br_fdb_delete(br, p, addr, v->vid);
}
}
return err;
}
int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p)
{
struct net_bridge_fdb_entry *f, *tmp;
int err = 0;
ASSERT_RTNL();
/* the key here is that static entries change only under rtnl */
rcu_read_lock();
hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
/* We only care for static entries */
if (!test_bit(BR_FDB_STATIC, &f->flags))
continue;
err = dev_uc_add(p->dev, f->key.addr.addr);
if (err)
goto rollback;
}
done:
rcu_read_unlock();
return err;
rollback:
hlist_for_each_entry_rcu(tmp, &br->fdb_list, fdb_node) {
/* We only care for static entries */
if (!test_bit(BR_FDB_STATIC, &tmp->flags))
continue;
if (tmp == f)
break;
dev_uc_del(p->dev, tmp->key.addr.addr);
}
goto done;
}
void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
{
struct net_bridge_fdb_entry *f;
ASSERT_RTNL();
rcu_read_lock();
hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
/* We only care for static entries */
if (!test_bit(BR_FDB_STATIC, &f->flags))
continue;
dev_uc_del(p->dev, f->key.addr.addr);
}
rcu_read_unlock();
}
int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
const unsigned char *addr, u16 vid, bool locked,
bool swdev_notify)
{
struct net_bridge_fdb_entry *fdb;
bool modified = false;
int err = 0;
trace_br_fdb_external_learn_add(br, p, addr, vid);
if (locked && (!p || !(p->flags & BR_PORT_MAB)))
return -EINVAL;
spin_lock_bh(&br->hash_lock);
fdb = br_fdb_find(br, addr, vid);
if (!fdb) {
unsigned long flags = BIT(BR_FDB_ADDED_BY_EXT_LEARN);
if (swdev_notify)
flags |= BIT(BR_FDB_ADDED_BY_USER);
if (!p)
flags |= BIT(BR_FDB_LOCAL);
if (locked)
flags |= BIT(BR_FDB_LOCKED);
fdb = fdb_create(br, p, addr, vid, flags);
if (!fdb) {
err = -ENOMEM;
goto err_unlock;
}
fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
} else {
if (locked &&
(!test_bit(BR_FDB_LOCKED, &fdb->flags) ||
READ_ONCE(fdb->dst) != p)) {
err = -EINVAL;
goto err_unlock;
}
fdb->updated = jiffies;
if (READ_ONCE(fdb->dst) != p) {
WRITE_ONCE(fdb->dst, p);
modified = true;
}
if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
/* Refresh entry */
fdb->used = jiffies;
} else if (!test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags)) {
/* Take over SW learned entry */
set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags);
modified = true;
}
if (locked != test_bit(BR_FDB_LOCKED, &fdb->flags)) {
change_bit(BR_FDB_LOCKED, &fdb->flags);
modified = true;
}
if (swdev_notify)
set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
if (!p)
set_bit(BR_FDB_LOCAL, &fdb->flags);
if (modified)
fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
}
err_unlock:
spin_unlock_bh(&br->hash_lock);
return err;
}
int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
const unsigned char *addr, u16 vid,
bool swdev_notify)
{
struct net_bridge_fdb_entry *fdb;
int err = 0;
spin_lock_bh(&br->hash_lock);
fdb = br_fdb_find(br, addr, vid);
if (fdb && test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
fdb_delete(br, fdb, swdev_notify);
else
err = -ENOENT;
spin_unlock_bh(&br->hash_lock);
return err;
}
void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p,
const unsigned char *addr, u16 vid, bool offloaded)
{
struct net_bridge_fdb_entry *fdb;
spin_lock_bh(&br->hash_lock);
fdb = br_fdb_find(br, addr, vid);
if (fdb && offloaded != test_bit(BR_FDB_OFFLOADED, &fdb->flags))
change_bit(BR_FDB_OFFLOADED, &fdb->flags);
spin_unlock_bh(&br->hash_lock);
}
void br_fdb_clear_offload(const struct net_device *dev, u16 vid)
{
struct net_bridge_fdb_entry *f;
struct net_bridge_port *p;
ASSERT_RTNL();
p = br_port_get_rtnl(dev);
if (!p)
return;
spin_lock_bh(&p->br->hash_lock);
hlist_for_each_entry(f, &p->br->fdb_list, fdb_node) {
if (f->dst == p && f->key.vlan_id == vid)
clear_bit(BR_FDB_OFFLOADED, &f->flags);
}
spin_unlock_bh(&p->br->hash_lock);
}
EXPORT_SYMBOL_GPL(br_fdb_clear_offload);
| linux-master | net/bridge/br_fdb.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Spanning tree protocol; BPDU handling
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/netfilter_bridge.h>
#include <linux/etherdevice.h>
#include <linux/llc.h>
#include <linux/slab.h>
#include <linux/pkt_sched.h>
#include <net/net_namespace.h>
#include <net/llc.h>
#include <net/llc_pdu.h>
#include <net/stp.h>
#include <asm/unaligned.h>
#include "br_private.h"
#include "br_private_stp.h"
#define STP_HZ 256
#define LLC_RESERVE sizeof(struct llc_pdu_un)
static int br_send_bpdu_finish(struct net *net, struct sock *sk,
struct sk_buff *skb)
{
return dev_queue_xmit(skb);
}
static void br_send_bpdu(struct net_bridge_port *p,
const unsigned char *data, int length)
{
struct sk_buff *skb;
skb = dev_alloc_skb(length+LLC_RESERVE);
if (!skb)
return;
skb->dev = p->dev;
skb->protocol = htons(ETH_P_802_2);
skb->priority = TC_PRIO_CONTROL;
skb_reserve(skb, LLC_RESERVE);
__skb_put_data(skb, data, length);
llc_pdu_header_init(skb, LLC_PDU_TYPE_U, LLC_SAP_BSPAN,
LLC_SAP_BSPAN, LLC_PDU_CMD);
llc_pdu_init_as_ui_cmd(skb);
llc_mac_hdr_init(skb, p->dev->dev_addr, p->br->group_addr);
skb_reset_mac_header(skb);
NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
dev_net(p->dev), NULL, skb, NULL, skb->dev,
br_send_bpdu_finish);
}
static inline void br_set_ticks(unsigned char *dest, int j)
{
unsigned long ticks = (STP_HZ * j)/ HZ;
put_unaligned_be16(ticks, dest);
}
static inline int br_get_ticks(const unsigned char *src)
{
unsigned long ticks = get_unaligned_be16(src);
return DIV_ROUND_UP(ticks * HZ, STP_HZ);
}
/* called under bridge lock */
void br_send_config_bpdu(struct net_bridge_port *p, struct br_config_bpdu *bpdu)
{
unsigned char buf[35];
if (p->br->stp_enabled != BR_KERNEL_STP)
return;
buf[0] = 0;
buf[1] = 0;
buf[2] = 0;
buf[3] = BPDU_TYPE_CONFIG;
buf[4] = (bpdu->topology_change ? 0x01 : 0) |
(bpdu->topology_change_ack ? 0x80 : 0);
buf[5] = bpdu->root.prio[0];
buf[6] = bpdu->root.prio[1];
buf[7] = bpdu->root.addr[0];
buf[8] = bpdu->root.addr[1];
buf[9] = bpdu->root.addr[2];
buf[10] = bpdu->root.addr[3];
buf[11] = bpdu->root.addr[4];
buf[12] = bpdu->root.addr[5];
buf[13] = (bpdu->root_path_cost >> 24) & 0xFF;
buf[14] = (bpdu->root_path_cost >> 16) & 0xFF;
buf[15] = (bpdu->root_path_cost >> 8) & 0xFF;
buf[16] = bpdu->root_path_cost & 0xFF;
buf[17] = bpdu->bridge_id.prio[0];
buf[18] = bpdu->bridge_id.prio[1];
buf[19] = bpdu->bridge_id.addr[0];
buf[20] = bpdu->bridge_id.addr[1];
buf[21] = bpdu->bridge_id.addr[2];
buf[22] = bpdu->bridge_id.addr[3];
buf[23] = bpdu->bridge_id.addr[4];
buf[24] = bpdu->bridge_id.addr[5];
buf[25] = (bpdu->port_id >> 8) & 0xFF;
buf[26] = bpdu->port_id & 0xFF;
br_set_ticks(buf+27, bpdu->message_age);
br_set_ticks(buf+29, bpdu->max_age);
br_set_ticks(buf+31, bpdu->hello_time);
br_set_ticks(buf+33, bpdu->forward_delay);
br_send_bpdu(p, buf, 35);
p->stp_xstats.tx_bpdu++;
}
/* called under bridge lock */
void br_send_tcn_bpdu(struct net_bridge_port *p)
{
unsigned char buf[4];
if (p->br->stp_enabled != BR_KERNEL_STP)
return;
buf[0] = 0;
buf[1] = 0;
buf[2] = 0;
buf[3] = BPDU_TYPE_TCN;
br_send_bpdu(p, buf, 4);
p->stp_xstats.tx_tcn++;
}
/*
* Called from llc.
*
* NO locks, but rcu_read_lock
*/
void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
struct net_device *dev)
{
struct net_bridge_port *p;
struct net_bridge *br;
const unsigned char *buf;
if (!pskb_may_pull(skb, 4))
goto err;
/* compare of protocol id and version */
buf = skb->data;
if (buf[0] != 0 || buf[1] != 0 || buf[2] != 0)
goto err;
p = br_port_get_check_rcu(dev);
if (!p)
goto err;
br = p->br;
spin_lock(&br->lock);
if (br->stp_enabled != BR_KERNEL_STP)
goto out;
if (!(br->dev->flags & IFF_UP))
goto out;
if (p->state == BR_STATE_DISABLED)
goto out;
if (!ether_addr_equal(eth_hdr(skb)->h_dest, br->group_addr))
goto out;
if (p->flags & BR_BPDU_GUARD) {
br_notice(br, "BPDU received on blocked port %u(%s)\n",
(unsigned int) p->port_no, p->dev->name);
br_stp_disable_port(p);
goto out;
}
buf = skb_pull(skb, 3);
if (buf[0] == BPDU_TYPE_CONFIG) {
struct br_config_bpdu bpdu;
if (!pskb_may_pull(skb, 32))
goto out;
buf = skb->data;
bpdu.topology_change = (buf[1] & 0x01) ? 1 : 0;
bpdu.topology_change_ack = (buf[1] & 0x80) ? 1 : 0;
bpdu.root.prio[0] = buf[2];
bpdu.root.prio[1] = buf[3];
bpdu.root.addr[0] = buf[4];
bpdu.root.addr[1] = buf[5];
bpdu.root.addr[2] = buf[6];
bpdu.root.addr[3] = buf[7];
bpdu.root.addr[4] = buf[8];
bpdu.root.addr[5] = buf[9];
bpdu.root_path_cost =
(buf[10] << 24) |
(buf[11] << 16) |
(buf[12] << 8) |
buf[13];
bpdu.bridge_id.prio[0] = buf[14];
bpdu.bridge_id.prio[1] = buf[15];
bpdu.bridge_id.addr[0] = buf[16];
bpdu.bridge_id.addr[1] = buf[17];
bpdu.bridge_id.addr[2] = buf[18];
bpdu.bridge_id.addr[3] = buf[19];
bpdu.bridge_id.addr[4] = buf[20];
bpdu.bridge_id.addr[5] = buf[21];
bpdu.port_id = (buf[22] << 8) | buf[23];
bpdu.message_age = br_get_ticks(buf+24);
bpdu.max_age = br_get_ticks(buf+26);
bpdu.hello_time = br_get_ticks(buf+28);
bpdu.forward_delay = br_get_ticks(buf+30);
if (bpdu.message_age > bpdu.max_age) {
if (net_ratelimit())
br_notice(p->br,
"port %u config from %pM"
" (message_age %ul > max_age %ul)\n",
p->port_no,
eth_hdr(skb)->h_source,
bpdu.message_age, bpdu.max_age);
goto out;
}
br_received_config_bpdu(p, &bpdu);
} else if (buf[0] == BPDU_TYPE_TCN) {
br_received_tcn_bpdu(p);
}
out:
spin_unlock(&br->lock);
err:
kfree_skb(skb);
}
| linux-master | net/bridge/br_stp_bpdu.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Sysfs attributes of bridge ports
* Linux ethernet bridge
*
* Authors:
* Stephen Hemminger <[email protected]>
*/
#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/if_bridge.h>
#include <linux/rtnetlink.h>
#include <linux/spinlock.h>
#include <linux/sched/signal.h>
#include "br_private.h"
/* IMPORTANT: new bridge port options must be added with netlink support only
* please do not add new sysfs entries
*/
struct brport_attribute {
struct attribute attr;
ssize_t (*show)(struct net_bridge_port *, char *);
int (*store)(struct net_bridge_port *, unsigned long);
int (*store_raw)(struct net_bridge_port *, char *);
};
#define BRPORT_ATTR_RAW(_name, _mode, _show, _store) \
const struct brport_attribute brport_attr_##_name = { \
.attr = {.name = __stringify(_name), \
.mode = _mode }, \
.show = _show, \
.store_raw = _store, \
};
#define BRPORT_ATTR(_name, _mode, _show, _store) \
const struct brport_attribute brport_attr_##_name = { \
.attr = {.name = __stringify(_name), \
.mode = _mode }, \
.show = _show, \
.store = _store, \
};
#define BRPORT_ATTR_FLAG(_name, _mask) \
static ssize_t show_##_name(struct net_bridge_port *p, char *buf) \
{ \
return sprintf(buf, "%d\n", !!(p->flags & _mask)); \
} \
static int store_##_name(struct net_bridge_port *p, unsigned long v) \
{ \
return store_flag(p, v, _mask); \
} \
static BRPORT_ATTR(_name, 0644, \
show_##_name, store_##_name)
static int store_flag(struct net_bridge_port *p, unsigned long v,
unsigned long mask)
{
struct netlink_ext_ack extack = {0};
unsigned long flags = p->flags;
int err;
if (v)
flags |= mask;
else
flags &= ~mask;
if (flags != p->flags) {
err = br_switchdev_set_port_flag(p, flags, mask, &extack);
if (err) {
netdev_err(p->dev, "%s\n", extack._msg);
return err;
}
p->flags = flags;
br_port_flags_change(p, mask);
}
return 0;
}
static ssize_t show_path_cost(struct net_bridge_port *p, char *buf)
{
return sprintf(buf, "%d\n", p->path_cost);
}
static BRPORT_ATTR(path_cost, 0644,
show_path_cost, br_stp_set_path_cost);
static ssize_t show_priority(struct net_bridge_port *p, char *buf)
{
return sprintf(buf, "%d\n", p->priority);
}
static BRPORT_ATTR(priority, 0644,
show_priority, br_stp_set_port_priority);
static ssize_t show_designated_root(struct net_bridge_port *p, char *buf)
{
return br_show_bridge_id(buf, &p->designated_root);
}
static BRPORT_ATTR(designated_root, 0444, show_designated_root, NULL);
static ssize_t show_designated_bridge(struct net_bridge_port *p, char *buf)
{
return br_show_bridge_id(buf, &p->designated_bridge);
}
static BRPORT_ATTR(designated_bridge, 0444, show_designated_bridge, NULL);
static ssize_t show_designated_port(struct net_bridge_port *p, char *buf)
{
return sprintf(buf, "%d\n", p->designated_port);
}
static BRPORT_ATTR(designated_port, 0444, show_designated_port, NULL);
static ssize_t show_designated_cost(struct net_bridge_port *p, char *buf)
{
return sprintf(buf, "%d\n", p->designated_cost);
}
static BRPORT_ATTR(designated_cost, 0444, show_designated_cost, NULL);
static ssize_t show_port_id(struct net_bridge_port *p, char *buf)
{
return sprintf(buf, "0x%x\n", p->port_id);
}
static BRPORT_ATTR(port_id, 0444, show_port_id, NULL);
static ssize_t show_port_no(struct net_bridge_port *p, char *buf)
{
return sprintf(buf, "0x%x\n", p->port_no);
}
static BRPORT_ATTR(port_no, 0444, show_port_no, NULL);
static ssize_t show_change_ack(struct net_bridge_port *p, char *buf)
{
return sprintf(buf, "%d\n", p->topology_change_ack);
}
static BRPORT_ATTR(change_ack, 0444, show_change_ack, NULL);
static ssize_t show_config_pending(struct net_bridge_port *p, char *buf)
{
return sprintf(buf, "%d\n", p->config_pending);
}
static BRPORT_ATTR(config_pending, 0444, show_config_pending, NULL);
static ssize_t show_port_state(struct net_bridge_port *p, char *buf)
{
return sprintf(buf, "%d\n", p->state);
}
static BRPORT_ATTR(state, 0444, show_port_state, NULL);
static ssize_t show_message_age_timer(struct net_bridge_port *p,
char *buf)
{
return sprintf(buf, "%ld\n", br_timer_value(&p->message_age_timer));
}
static BRPORT_ATTR(message_age_timer, 0444, show_message_age_timer, NULL);
static ssize_t show_forward_delay_timer(struct net_bridge_port *p,
char *buf)
{
return sprintf(buf, "%ld\n", br_timer_value(&p->forward_delay_timer));
}
static BRPORT_ATTR(forward_delay_timer, 0444, show_forward_delay_timer, NULL);
static ssize_t show_hold_timer(struct net_bridge_port *p,
char *buf)
{
return sprintf(buf, "%ld\n", br_timer_value(&p->hold_timer));
}
static BRPORT_ATTR(hold_timer, 0444, show_hold_timer, NULL);
static int store_flush(struct net_bridge_port *p, unsigned long v)
{
br_fdb_delete_by_port(p->br, p, 0, 0); // Don't delete local entry
return 0;
}
static BRPORT_ATTR(flush, 0200, NULL, store_flush);
static ssize_t show_group_fwd_mask(struct net_bridge_port *p, char *buf)
{
return sprintf(buf, "%#x\n", p->group_fwd_mask);
}
static int store_group_fwd_mask(struct net_bridge_port *p,
unsigned long v)
{
if (v & BR_GROUPFWD_MACPAUSE)
return -EINVAL;
p->group_fwd_mask = v;
return 0;
}
static BRPORT_ATTR(group_fwd_mask, 0644, show_group_fwd_mask,
store_group_fwd_mask);
static ssize_t show_backup_port(struct net_bridge_port *p, char *buf)
{
struct net_bridge_port *backup_p;
int ret = 0;
rcu_read_lock();
backup_p = rcu_dereference(p->backup_port);
if (backup_p)
ret = sprintf(buf, "%s\n", backup_p->dev->name);
rcu_read_unlock();
return ret;
}
static int store_backup_port(struct net_bridge_port *p, char *buf)
{
struct net_device *backup_dev = NULL;
char *nl = strchr(buf, '\n');
if (nl)
*nl = '\0';
if (strlen(buf) > 0) {
backup_dev = __dev_get_by_name(dev_net(p->dev), buf);
if (!backup_dev)
return -ENOENT;
}
return nbp_backup_change(p, backup_dev);
}
static BRPORT_ATTR_RAW(backup_port, 0644, show_backup_port, store_backup_port);
BRPORT_ATTR_FLAG(hairpin_mode, BR_HAIRPIN_MODE);
BRPORT_ATTR_FLAG(bpdu_guard, BR_BPDU_GUARD);
BRPORT_ATTR_FLAG(root_block, BR_ROOT_BLOCK);
BRPORT_ATTR_FLAG(learning, BR_LEARNING);
BRPORT_ATTR_FLAG(unicast_flood, BR_FLOOD);
BRPORT_ATTR_FLAG(proxyarp, BR_PROXYARP);
BRPORT_ATTR_FLAG(proxyarp_wifi, BR_PROXYARP_WIFI);
BRPORT_ATTR_FLAG(multicast_flood, BR_MCAST_FLOOD);
BRPORT_ATTR_FLAG(broadcast_flood, BR_BCAST_FLOOD);
BRPORT_ATTR_FLAG(neigh_suppress, BR_NEIGH_SUPPRESS);
BRPORT_ATTR_FLAG(isolated, BR_ISOLATED);
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf)
{
return sprintf(buf, "%d\n", p->multicast_ctx.multicast_router);
}
static int store_multicast_router(struct net_bridge_port *p,
unsigned long v)
{
return br_multicast_set_port_router(&p->multicast_ctx, v);
}
static BRPORT_ATTR(multicast_router, 0644, show_multicast_router,
store_multicast_router);
BRPORT_ATTR_FLAG(multicast_fast_leave, BR_MULTICAST_FAST_LEAVE);
BRPORT_ATTR_FLAG(multicast_to_unicast, BR_MULTICAST_TO_UNICAST);
#endif
static const struct brport_attribute *brport_attrs[] = {
&brport_attr_path_cost,
&brport_attr_priority,
&brport_attr_port_id,
&brport_attr_port_no,
&brport_attr_designated_root,
&brport_attr_designated_bridge,
&brport_attr_designated_port,
&brport_attr_designated_cost,
&brport_attr_state,
&brport_attr_change_ack,
&brport_attr_config_pending,
&brport_attr_message_age_timer,
&brport_attr_forward_delay_timer,
&brport_attr_hold_timer,
&brport_attr_flush,
&brport_attr_hairpin_mode,
&brport_attr_bpdu_guard,
&brport_attr_root_block,
&brport_attr_learning,
&brport_attr_unicast_flood,
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
&brport_attr_multicast_router,
&brport_attr_multicast_fast_leave,
&brport_attr_multicast_to_unicast,
#endif
&brport_attr_proxyarp,
&brport_attr_proxyarp_wifi,
&brport_attr_multicast_flood,
&brport_attr_broadcast_flood,
&brport_attr_group_fwd_mask,
&brport_attr_neigh_suppress,
&brport_attr_isolated,
&brport_attr_backup_port,
NULL
};
#define to_brport_attr(_at) container_of(_at, struct brport_attribute, attr)
static ssize_t brport_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
struct brport_attribute *brport_attr = to_brport_attr(attr);
struct net_bridge_port *p = kobj_to_brport(kobj);
if (!brport_attr->show)
return -EINVAL;
return brport_attr->show(p, buf);
}
static ssize_t brport_store(struct kobject *kobj,
struct attribute *attr,
const char *buf, size_t count)
{
struct brport_attribute *brport_attr = to_brport_attr(attr);
struct net_bridge_port *p = kobj_to_brport(kobj);
ssize_t ret = -EINVAL;
unsigned long val;
char *endp;
if (!ns_capable(dev_net(p->dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
if (!rtnl_trylock())
return restart_syscall();
if (brport_attr->store_raw) {
char *buf_copy;
buf_copy = kstrndup(buf, count, GFP_KERNEL);
if (!buf_copy) {
ret = -ENOMEM;
goto out_unlock;
}
spin_lock_bh(&p->br->lock);
ret = brport_attr->store_raw(p, buf_copy);
spin_unlock_bh(&p->br->lock);
kfree(buf_copy);
} else if (brport_attr->store) {
val = simple_strtoul(buf, &endp, 0);
if (endp == buf)
goto out_unlock;
spin_lock_bh(&p->br->lock);
ret = brport_attr->store(p, val);
spin_unlock_bh(&p->br->lock);
}
if (!ret) {
br_ifinfo_notify(RTM_NEWLINK, NULL, p);
ret = count;
}
out_unlock:
rtnl_unlock();
return ret;
}
const struct sysfs_ops brport_sysfs_ops = {
.show = brport_show,
.store = brport_store,
};
/*
* Add sysfs entries to ethernet device added to a bridge.
* Creates a brport subdirectory with bridge attributes.
* Puts symlink in bridge's brif subdirectory
*/
int br_sysfs_addif(struct net_bridge_port *p)
{
struct net_bridge *br = p->br;
const struct brport_attribute **a;
int err;
err = sysfs_create_link(&p->kobj, &br->dev->dev.kobj,
SYSFS_BRIDGE_PORT_LINK);
if (err)
return err;
for (a = brport_attrs; *a; ++a) {
err = sysfs_create_file(&p->kobj, &((*a)->attr));
if (err)
return err;
}
strscpy(p->sysfs_name, p->dev->name, IFNAMSIZ);
return sysfs_create_link(br->ifobj, &p->kobj, p->sysfs_name);
}
/* Rename bridge's brif symlink */
int br_sysfs_renameif(struct net_bridge_port *p)
{
struct net_bridge *br = p->br;
int err;
/* If a rename fails, the rollback will cause another
* rename call with the existing name.
*/
if (!strncmp(p->sysfs_name, p->dev->name, IFNAMSIZ))
return 0;
err = sysfs_rename_link(br->ifobj, &p->kobj,
p->sysfs_name, p->dev->name);
if (err)
netdev_notice(br->dev, "unable to rename link %s to %s",
p->sysfs_name, p->dev->name);
else
strscpy(p->sysfs_name, p->dev->name, IFNAMSIZ);
return err;
}
| linux-master | net/bridge/br_sysfs_if.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Handle incoming frames
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <[email protected]>
*/
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/netfilter_bridge.h>
#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
#include <net/netfilter/nf_queue.h>
#endif
#include <linux/neighbour.h>
#include <net/arp.h>
#include <net/dsa.h>
#include <linux/export.h>
#include <linux/rculist.h>
#include "br_private.h"
#include "br_private_tunnel.h"
static int
br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
{
br_drop_fake_rtable(skb);
return netif_receive_skb(skb);
}
static int br_pass_frame_up(struct sk_buff *skb)
{
struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
struct net_bridge *br = netdev_priv(brdev);
struct net_bridge_vlan_group *vg;
dev_sw_netstats_rx_add(brdev, skb->len);
vg = br_vlan_group_rcu(br);
/* Reset the offload_fwd_mark because there could be a stacked
* bridge above, and it should not think this bridge it doing
* that bridge's work forwarding out its ports.
*/
br_switchdev_frame_unmark(skb);
/* Bridge is just like any other port. Make sure the
* packet is allowed except in promisc mode when someone
* may be running packet capture.
*/
if (!(brdev->flags & IFF_PROMISC) &&
!br_allowed_egress(vg, skb)) {
kfree_skb(skb);
return NET_RX_DROP;
}
indev = skb->dev;
skb->dev = brdev;
skb = br_handle_vlan(br, NULL, vg, skb);
if (!skb)
return NET_RX_DROP;
/* update the multicast stats if the packet is IGMP/MLD */
br_multicast_count(br, NULL, skb, br_multicast_igmp_type(skb),
BR_MCAST_DIR_TX);
return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
dev_net(indev), NULL, skb, indev, NULL,
br_netif_receive_skb);
}
/* note: already called with rcu_read_lock */
int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct net_bridge_port *p = br_port_get_rcu(skb->dev);
enum br_pkt_type pkt_type = BR_PKT_UNICAST;
struct net_bridge_fdb_entry *dst = NULL;
struct net_bridge_mcast_port *pmctx;
struct net_bridge_mdb_entry *mdst;
bool local_rcv, mcast_hit = false;
struct net_bridge_mcast *brmctx;
struct net_bridge_vlan *vlan;
struct net_bridge *br;
u16 vid = 0;
u8 state;
if (!p)
goto drop;
br = p->br;
if (br_mst_is_enabled(br)) {
state = BR_STATE_FORWARDING;
} else {
if (p->state == BR_STATE_DISABLED)
goto drop;
state = p->state;
}
brmctx = &p->br->multicast_ctx;
pmctx = &p->multicast_ctx;
if (!br_allowed_ingress(p->br, nbp_vlan_group_rcu(p), skb, &vid,
&state, &vlan))
goto out;
if (p->flags & BR_PORT_LOCKED) {
struct net_bridge_fdb_entry *fdb_src =
br_fdb_find_rcu(br, eth_hdr(skb)->h_source, vid);
if (!fdb_src) {
/* FDB miss. Create locked FDB entry if MAB is enabled
* and drop the packet.
*/
if (p->flags & BR_PORT_MAB)
br_fdb_update(br, p, eth_hdr(skb)->h_source,
vid, BIT(BR_FDB_LOCKED));
goto drop;
} else if (READ_ONCE(fdb_src->dst) != p ||
test_bit(BR_FDB_LOCAL, &fdb_src->flags)) {
/* FDB mismatch. Drop the packet without roaming. */
goto drop;
} else if (test_bit(BR_FDB_LOCKED, &fdb_src->flags)) {
/* FDB match, but entry is locked. Refresh it and drop
* the packet.
*/
br_fdb_update(br, p, eth_hdr(skb)->h_source, vid,
BIT(BR_FDB_LOCKED));
goto drop;
}
}
nbp_switchdev_frame_mark(p, skb);
/* insert into forwarding database after filtering to avoid spoofing */
if (p->flags & BR_LEARNING)
br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, 0);
local_rcv = !!(br->dev->flags & IFF_PROMISC);
if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
/* by definition the broadcast is also a multicast address */
if (is_broadcast_ether_addr(eth_hdr(skb)->h_dest)) {
pkt_type = BR_PKT_BROADCAST;
local_rcv = true;
} else {
pkt_type = BR_PKT_MULTICAST;
if (br_multicast_rcv(&brmctx, &pmctx, vlan, skb, vid))
goto drop;
}
}
if (state == BR_STATE_LEARNING)
goto drop;
BR_INPUT_SKB_CB(skb)->brdev = br->dev;
BR_INPUT_SKB_CB(skb)->src_port_isolated = !!(p->flags & BR_ISOLATED);
if (IS_ENABLED(CONFIG_INET) &&
(skb->protocol == htons(ETH_P_ARP) ||
skb->protocol == htons(ETH_P_RARP))) {
br_do_proxy_suppress_arp(skb, br, vid, p);
} else if (IS_ENABLED(CONFIG_IPV6) &&
skb->protocol == htons(ETH_P_IPV6) &&
br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) &&
pskb_may_pull(skb, sizeof(struct ipv6hdr) +
sizeof(struct nd_msg)) &&
ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
struct nd_msg *msg, _msg;
msg = br_is_nd_neigh_msg(skb, &_msg);
if (msg)
br_do_suppress_nd(skb, br, vid, p, msg);
}
switch (pkt_type) {
case BR_PKT_MULTICAST:
mdst = br_mdb_get(brmctx, skb, vid);
if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
br_multicast_querier_exists(brmctx, eth_hdr(skb), mdst)) {
if ((mdst && mdst->host_joined) ||
br_multicast_is_router(brmctx, skb)) {
local_rcv = true;
DEV_STATS_INC(br->dev, multicast);
}
mcast_hit = true;
} else {
local_rcv = true;
DEV_STATS_INC(br->dev, multicast);
}
break;
case BR_PKT_UNICAST:
dst = br_fdb_find_rcu(br, eth_hdr(skb)->h_dest, vid);
break;
default:
break;
}
if (dst) {
unsigned long now = jiffies;
if (test_bit(BR_FDB_LOCAL, &dst->flags))
return br_pass_frame_up(skb);
if (now != dst->used)
dst->used = now;
br_forward(dst->dst, skb, local_rcv, false);
} else {
if (!mcast_hit)
br_flood(br, skb, pkt_type, local_rcv, false, vid);
else
br_multicast_flood(mdst, skb, brmctx, local_rcv, false);
}
if (local_rcv)
return br_pass_frame_up(skb);
out:
return 0;
drop:
kfree_skb(skb);
goto out;
}
EXPORT_SYMBOL_GPL(br_handle_frame_finish);
static void __br_handle_local_finish(struct sk_buff *skb)
{
struct net_bridge_port *p = br_port_get_rcu(skb->dev);
u16 vid = 0;
/* check if vlan is allowed, to avoid spoofing */
if ((p->flags & BR_LEARNING) &&
nbp_state_should_learn(p) &&
!br_opt_get(p->br, BROPT_NO_LL_LEARN) &&
br_should_learn(p, skb, &vid))
br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, 0);
}
/* note: already called with rcu_read_lock */
static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
__br_handle_local_finish(skb);
/* return 1 to signal the okfn() was called so it's ok to use the skb */
return 1;
}
static int nf_hook_bridge_pre(struct sk_buff *skb, struct sk_buff **pskb)
{
#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
struct nf_hook_entries *e = NULL;
struct nf_hook_state state;
unsigned int verdict, i;
struct net *net;
int ret;
net = dev_net(skb->dev);
#ifdef HAVE_JUMP_LABEL
if (!static_key_false(&nf_hooks_needed[NFPROTO_BRIDGE][NF_BR_PRE_ROUTING]))
goto frame_finish;
#endif
e = rcu_dereference(net->nf.hooks_bridge[NF_BR_PRE_ROUTING]);
if (!e)
goto frame_finish;
nf_hook_state_init(&state, NF_BR_PRE_ROUTING,
NFPROTO_BRIDGE, skb->dev, NULL, NULL,
net, br_handle_frame_finish);
for (i = 0; i < e->num_hook_entries; i++) {
verdict = nf_hook_entry_hookfn(&e->hooks[i], skb, &state);
switch (verdict & NF_VERDICT_MASK) {
case NF_ACCEPT:
if (BR_INPUT_SKB_CB(skb)->br_netfilter_broute) {
*pskb = skb;
return RX_HANDLER_PASS;
}
break;
case NF_DROP:
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
case NF_QUEUE:
ret = nf_queue(skb, &state, i, verdict);
if (ret == 1)
continue;
return RX_HANDLER_CONSUMED;
default: /* STOLEN */
return RX_HANDLER_CONSUMED;
}
}
frame_finish:
net = dev_net(skb->dev);
br_handle_frame_finish(net, NULL, skb);
#else
br_handle_frame_finish(dev_net(skb->dev), NULL, skb);
#endif
return RX_HANDLER_CONSUMED;
}
/* Return 0 if the frame was not processed otherwise 1
* note: already called with rcu_read_lock
*/
static int br_process_frame_type(struct net_bridge_port *p,
struct sk_buff *skb)
{
struct br_frame_type *tmp;
hlist_for_each_entry_rcu(tmp, &p->br->frame_type_list, list)
if (unlikely(tmp->type == skb->protocol))
return tmp->frame_handler(p, skb);
return 0;
}
/*
* Return NULL if skb is handled
* note: already called with rcu_read_lock
*/
static rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
{
struct net_bridge_port *p;
struct sk_buff *skb = *pskb;
const unsigned char *dest = eth_hdr(skb)->h_dest;
if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
return RX_HANDLER_PASS;
if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
goto drop;
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb)
return RX_HANDLER_CONSUMED;
memset(skb->cb, 0, sizeof(struct br_input_skb_cb));
br_tc_skb_miss_set(skb, false);
p = br_port_get_rcu(skb->dev);
if (p->flags & BR_VLAN_TUNNEL)
br_handle_ingress_vlan_tunnel(skb, p, nbp_vlan_group_rcu(p));
if (unlikely(is_link_local_ether_addr(dest))) {
u16 fwd_mask = p->br->group_fwd_mask_required;
/*
* See IEEE 802.1D Table 7-10 Reserved addresses
*
* Assignment Value
* Bridge Group Address 01-80-C2-00-00-00
* (MAC Control) 802.3 01-80-C2-00-00-01
* (Link Aggregation) 802.3 01-80-C2-00-00-02
* 802.1X PAE address 01-80-C2-00-00-03
*
* 802.1AB LLDP 01-80-C2-00-00-0E
*
* Others reserved for future standardization
*/
fwd_mask |= p->group_fwd_mask;
switch (dest[5]) {
case 0x00: /* Bridge Group Address */
/* If STP is turned off,
then must forward to keep loop detection */
if (p->br->stp_enabled == BR_NO_STP ||
fwd_mask & (1u << dest[5]))
goto forward;
*pskb = skb;
__br_handle_local_finish(skb);
return RX_HANDLER_PASS;
case 0x01: /* IEEE MAC (Pause) */
goto drop;
case 0x0E: /* 802.1AB LLDP */
fwd_mask |= p->br->group_fwd_mask;
if (fwd_mask & (1u << dest[5]))
goto forward;
*pskb = skb;
__br_handle_local_finish(skb);
return RX_HANDLER_PASS;
default:
/* Allow selective forwarding for most other protocols */
fwd_mask |= p->br->group_fwd_mask;
if (fwd_mask & (1u << dest[5]))
goto forward;
}
/* The else clause should be hit when nf_hook():
* - returns < 0 (drop/error)
* - returns = 0 (stolen/nf_queue)
* Thus return 1 from the okfn() to signal the skb is ok to pass
*/
if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
dev_net(skb->dev), NULL, skb, skb->dev, NULL,
br_handle_local_finish) == 1) {
return RX_HANDLER_PASS;
} else {
return RX_HANDLER_CONSUMED;
}
}
if (unlikely(br_process_frame_type(p, skb)))
return RX_HANDLER_PASS;
forward:
if (br_mst_is_enabled(p->br))
goto defer_stp_filtering;
switch (p->state) {
case BR_STATE_FORWARDING:
case BR_STATE_LEARNING:
defer_stp_filtering:
if (ether_addr_equal(p->br->dev->dev_addr, dest))
skb->pkt_type = PACKET_HOST;
return nf_hook_bridge_pre(skb, pskb);
default:
drop:
kfree_skb(skb);
}
return RX_HANDLER_CONSUMED;
}
/* This function has no purpose other than to appease the br_port_get_rcu/rtnl
* helpers which identify bridged ports according to the rx_handler installed
* on them (so there _needs_ to be a bridge rx_handler even if we don't need it
* to do anything useful). This bridge won't support traffic to/from the stack,
* but only hardware bridging. So return RX_HANDLER_PASS so we don't steal
* frames from the ETH_P_XDSA packet_type handler.
*/
static rx_handler_result_t br_handle_frame_dummy(struct sk_buff **pskb)
{
return RX_HANDLER_PASS;
}
rx_handler_func_t *br_get_rx_handler(const struct net_device *dev)
{
if (netdev_uses_dsa(dev))
return br_handle_frame_dummy;
return br_handle_frame;
}
void br_add_frame(struct net_bridge *br, struct br_frame_type *ft)
{
hlist_add_head_rcu(&ft->list, &br->frame_type_list);
}
void br_del_frame(struct net_bridge *br, struct br_frame_type *ft)
{
struct br_frame_type *tmp;
hlist_for_each_entry(tmp, &br->frame_type_list, list)
if (ft == tmp) {
hlist_del_rcu(&ft->list);
return;
}
}
| linux-master | net/bridge/br_input.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Device handling code
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/netpoll.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/list.h>
#include <linux/netfilter_bridge.h>
#include <linux/uaccess.h>
#include "br_private.h"
#define COMMON_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | \
NETIF_F_GSO_MASK | NETIF_F_HW_CSUM)
const struct nf_br_ops __rcu *nf_br_ops __read_mostly;
EXPORT_SYMBOL_GPL(nf_br_ops);
/* net device transmit always called with BH disabled */
netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct net_bridge_mcast_port *pmctx_null = NULL;
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_mcast *brmctx = &br->multicast_ctx;
struct net_bridge_fdb_entry *dst;
struct net_bridge_mdb_entry *mdst;
const struct nf_br_ops *nf_ops;
u8 state = BR_STATE_FORWARDING;
struct net_bridge_vlan *vlan;
const unsigned char *dest;
u16 vid = 0;
memset(skb->cb, 0, sizeof(struct br_input_skb_cb));
br_tc_skb_miss_set(skb, false);
rcu_read_lock();
nf_ops = rcu_dereference(nf_br_ops);
if (nf_ops && nf_ops->br_dev_xmit_hook(skb)) {
rcu_read_unlock();
return NETDEV_TX_OK;
}
dev_sw_netstats_tx_add(dev, 1, skb->len);
br_switchdev_frame_unmark(skb);
BR_INPUT_SKB_CB(skb)->brdev = dev;
BR_INPUT_SKB_CB(skb)->frag_max_size = 0;
skb_reset_mac_header(skb);
skb_pull(skb, ETH_HLEN);
if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid,
&state, &vlan))
goto out;
if (IS_ENABLED(CONFIG_INET) &&
(eth_hdr(skb)->h_proto == htons(ETH_P_ARP) ||
eth_hdr(skb)->h_proto == htons(ETH_P_RARP)) &&
br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) {
br_do_proxy_suppress_arp(skb, br, vid, NULL);
} else if (IS_ENABLED(CONFIG_IPV6) &&
skb->protocol == htons(ETH_P_IPV6) &&
br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) &&
pskb_may_pull(skb, sizeof(struct ipv6hdr) +
sizeof(struct nd_msg)) &&
ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
struct nd_msg *msg, _msg;
msg = br_is_nd_neigh_msg(skb, &_msg);
if (msg)
br_do_suppress_nd(skb, br, vid, NULL, msg);
}
dest = eth_hdr(skb)->h_dest;
if (is_broadcast_ether_addr(dest)) {
br_flood(br, skb, BR_PKT_BROADCAST, false, true, vid);
} else if (is_multicast_ether_addr(dest)) {
if (unlikely(netpoll_tx_running(dev))) {
br_flood(br, skb, BR_PKT_MULTICAST, false, true, vid);
goto out;
}
if (br_multicast_rcv(&brmctx, &pmctx_null, vlan, skb, vid)) {
kfree_skb(skb);
goto out;
}
mdst = br_mdb_get(brmctx, skb, vid);
if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
br_multicast_querier_exists(brmctx, eth_hdr(skb), mdst))
br_multicast_flood(mdst, skb, brmctx, false, true);
else
br_flood(br, skb, BR_PKT_MULTICAST, false, true, vid);
} else if ((dst = br_fdb_find_rcu(br, dest, vid)) != NULL) {
br_forward(dst->dst, skb, false, true);
} else {
br_flood(br, skb, BR_PKT_UNICAST, false, true, vid);
}
out:
rcu_read_unlock();
return NETDEV_TX_OK;
}
static struct lock_class_key bridge_netdev_addr_lock_key;
static void br_set_lockdep_class(struct net_device *dev)
{
lockdep_set_class(&dev->addr_list_lock, &bridge_netdev_addr_lock_key);
}
static int br_dev_init(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
int err;
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
err = br_fdb_hash_init(br);
if (err) {
free_percpu(dev->tstats);
return err;
}
err = br_mdb_hash_init(br);
if (err) {
free_percpu(dev->tstats);
br_fdb_hash_fini(br);
return err;
}
err = br_vlan_init(br);
if (err) {
free_percpu(dev->tstats);
br_mdb_hash_fini(br);
br_fdb_hash_fini(br);
return err;
}
err = br_multicast_init_stats(br);
if (err) {
free_percpu(dev->tstats);
br_vlan_flush(br);
br_mdb_hash_fini(br);
br_fdb_hash_fini(br);
}
br_set_lockdep_class(dev);
return err;
}
static void br_dev_uninit(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
br_multicast_dev_del(br);
br_multicast_uninit_stats(br);
br_vlan_flush(br);
br_mdb_hash_fini(br);
br_fdb_hash_fini(br);
free_percpu(dev->tstats);
}
static int br_dev_open(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
netdev_update_features(dev);
netif_start_queue(dev);
br_stp_enable_bridge(br);
br_multicast_open(br);
if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
br_multicast_join_snoopers(br);
return 0;
}
static void br_dev_set_multicast_list(struct net_device *dev)
{
}
static void br_dev_change_rx_flags(struct net_device *dev, int change)
{
if (change & IFF_PROMISC)
br_manage_promisc(netdev_priv(dev));
}
static int br_dev_stop(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
br_stp_disable_bridge(br);
br_multicast_stop(br);
if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
br_multicast_leave_snoopers(br);
netif_stop_queue(dev);
return 0;
}
static int br_change_mtu(struct net_device *dev, int new_mtu)
{
struct net_bridge *br = netdev_priv(dev);
dev->mtu = new_mtu;
/* this flag will be cleared if the MTU was automatically adjusted */
br_opt_toggle(br, BROPT_MTU_SET_BY_USER, true);
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
/* remember the MTU in the rtable for PMTU */
dst_metric_set(&br->fake_rtable.dst, RTAX_MTU, new_mtu);
#endif
return 0;
}
/* Allow setting mac address to any valid ethernet address. */
static int br_set_mac_address(struct net_device *dev, void *p)
{
struct net_bridge *br = netdev_priv(dev);
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
/* dev_set_mac_addr() can be called by a master device on bridge's
* NETDEV_UNREGISTER, but since it's being destroyed do nothing
*/
if (dev->reg_state != NETREG_REGISTERED)
return -EBUSY;
spin_lock_bh(&br->lock);
if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) {
/* Mac address will be changed in br_stp_change_bridge_id(). */
br_stp_change_bridge_id(br, addr->sa_data);
}
spin_unlock_bh(&br->lock);
return 0;
}
static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
strscpy(info->driver, "bridge", sizeof(info->driver));
strscpy(info->version, BR_VERSION, sizeof(info->version));
strscpy(info->fw_version, "N/A", sizeof(info->fw_version));
strscpy(info->bus_info, "N/A", sizeof(info->bus_info));
}
static int br_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_port *p;
cmd->base.duplex = DUPLEX_UNKNOWN;
cmd->base.port = PORT_OTHER;
cmd->base.speed = SPEED_UNKNOWN;
list_for_each_entry(p, &br->port_list, list) {
struct ethtool_link_ksettings ecmd;
struct net_device *pdev = p->dev;
if (!netif_running(pdev) || !netif_oper_up(pdev))
continue;
if (__ethtool_get_link_ksettings(pdev, &ecmd))
continue;
if (ecmd.base.speed == (__u32)SPEED_UNKNOWN)
continue;
if (cmd->base.speed == (__u32)SPEED_UNKNOWN ||
cmd->base.speed < ecmd.base.speed)
cmd->base.speed = ecmd.base.speed;
}
return 0;
}
static netdev_features_t br_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct net_bridge *br = netdev_priv(dev);
return br_features_recompute(br, features);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void br_poll_controller(struct net_device *br_dev)
{
}
static void br_netpoll_cleanup(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_port *p;
list_for_each_entry(p, &br->port_list, list)
br_netpoll_disable(p);
}
static int __br_netpoll_enable(struct net_bridge_port *p)
{
struct netpoll *np;
int err;
np = kzalloc(sizeof(*p->np), GFP_KERNEL);
if (!np)
return -ENOMEM;
err = __netpoll_setup(np, p->dev);
if (err) {
kfree(np);
return err;
}
p->np = np;
return err;
}
int br_netpoll_enable(struct net_bridge_port *p)
{
if (!p->br->dev->npinfo)
return 0;
return __br_netpoll_enable(p);
}
static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
{
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_port *p;
int err = 0;
list_for_each_entry(p, &br->port_list, list) {
if (!p->dev)
continue;
err = __br_netpoll_enable(p);
if (err)
goto fail;
}
out:
return err;
fail:
br_netpoll_cleanup(dev);
goto out;
}
void br_netpoll_disable(struct net_bridge_port *p)
{
struct netpoll *np = p->np;
if (!np)
return;
p->np = NULL;
__netpoll_free(np);
}
#endif
static int br_add_slave(struct net_device *dev, struct net_device *slave_dev,
struct netlink_ext_ack *extack)
{
struct net_bridge *br = netdev_priv(dev);
return br_add_if(br, slave_dev, extack);
}
static int br_del_slave(struct net_device *dev, struct net_device *slave_dev)
{
struct net_bridge *br = netdev_priv(dev);
return br_del_if(br, slave_dev);
}
static int br_fill_forward_path(struct net_device_path_ctx *ctx,
struct net_device_path *path)
{
struct net_bridge_fdb_entry *f;
struct net_bridge_port *dst;
struct net_bridge *br;
if (netif_is_bridge_port(ctx->dev))
return -1;
br = netdev_priv(ctx->dev);
br_vlan_fill_forward_path_pvid(br, ctx, path);
f = br_fdb_find_rcu(br, ctx->daddr, path->bridge.vlan_id);
if (!f || !f->dst)
return -1;
dst = READ_ONCE(f->dst);
if (!dst)
return -1;
if (br_vlan_fill_forward_path_mode(br, dst, path))
return -1;
path->type = DEV_PATH_BRIDGE;
path->dev = dst->br->dev;
ctx->dev = dst->dev;
switch (path->bridge.vlan_mode) {
case DEV_PATH_BR_VLAN_TAG:
if (ctx->num_vlans >= ARRAY_SIZE(ctx->vlan))
return -ENOSPC;
ctx->vlan[ctx->num_vlans].id = path->bridge.vlan_id;
ctx->vlan[ctx->num_vlans].proto = path->bridge.vlan_proto;
ctx->num_vlans++;
break;
case DEV_PATH_BR_VLAN_UNTAG_HW:
case DEV_PATH_BR_VLAN_UNTAG:
ctx->num_vlans--;
break;
case DEV_PATH_BR_VLAN_KEEP:
break;
}
return 0;
}
static const struct ethtool_ops br_ethtool_ops = {
.get_drvinfo = br_getinfo,
.get_link = ethtool_op_get_link,
.get_link_ksettings = br_get_link_ksettings,
};
static const struct net_device_ops br_netdev_ops = {
.ndo_open = br_dev_open,
.ndo_stop = br_dev_stop,
.ndo_init = br_dev_init,
.ndo_uninit = br_dev_uninit,
.ndo_start_xmit = br_dev_xmit,
.ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = br_set_mac_address,
.ndo_set_rx_mode = br_dev_set_multicast_list,
.ndo_change_rx_flags = br_dev_change_rx_flags,
.ndo_change_mtu = br_change_mtu,
.ndo_siocdevprivate = br_dev_siocdevprivate,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_netpoll_setup = br_netpoll_setup,
.ndo_netpoll_cleanup = br_netpoll_cleanup,
.ndo_poll_controller = br_poll_controller,
#endif
.ndo_add_slave = br_add_slave,
.ndo_del_slave = br_del_slave,
.ndo_fix_features = br_fix_features,
.ndo_fdb_add = br_fdb_add,
.ndo_fdb_del = br_fdb_delete,
.ndo_fdb_del_bulk = br_fdb_delete_bulk,
.ndo_fdb_dump = br_fdb_dump,
.ndo_fdb_get = br_fdb_get,
.ndo_mdb_add = br_mdb_add,
.ndo_mdb_del = br_mdb_del,
.ndo_mdb_dump = br_mdb_dump,
.ndo_bridge_getlink = br_getlink,
.ndo_bridge_setlink = br_setlink,
.ndo_bridge_dellink = br_dellink,
.ndo_features_check = passthru_features_check,
.ndo_fill_forward_path = br_fill_forward_path,
};
static struct device_type br_type = {
.name = "bridge",
};
void br_dev_setup(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
eth_hw_addr_random(dev);
ether_setup(dev);
dev->netdev_ops = &br_netdev_ops;
dev->needs_free_netdev = true;
dev->ethtool_ops = &br_ethtool_ops;
SET_NETDEV_DEVTYPE(dev, &br_type);
dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
dev->vlan_features = COMMON_FEATURES;
br->dev = dev;
spin_lock_init(&br->lock);
INIT_LIST_HEAD(&br->port_list);
INIT_HLIST_HEAD(&br->fdb_list);
INIT_HLIST_HEAD(&br->frame_type_list);
#if IS_ENABLED(CONFIG_BRIDGE_MRP)
INIT_HLIST_HEAD(&br->mrp_list);
#endif
#if IS_ENABLED(CONFIG_BRIDGE_CFM)
INIT_HLIST_HEAD(&br->mep_list);
#endif
spin_lock_init(&br->hash_lock);
br->bridge_id.prio[0] = 0x80;
br->bridge_id.prio[1] = 0x00;
ether_addr_copy(br->group_addr, eth_stp_addr);
br->stp_enabled = BR_NO_STP;
br->group_fwd_mask = BR_GROUPFWD_DEFAULT;
br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
br->designated_root = br->bridge_id;
br->bridge_max_age = br->max_age = 20 * HZ;
br->bridge_hello_time = br->hello_time = 2 * HZ;
br->bridge_forward_delay = br->forward_delay = 15 * HZ;
br->bridge_ageing_time = br->ageing_time = BR_DEFAULT_AGEING_TIME;
dev->max_mtu = ETH_MAX_MTU;
br_netfilter_rtable_init(br);
br_stp_timer_init(br);
br_multicast_init(br);
INIT_DELAYED_WORK(&br->gc_work, br_fdb_cleanup);
}
| linux-master | net/bridge/br_device.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Handle bridge arp/nd proxy/suppress
*
* Copyright (C) 2017 Cumulus Networks
* Copyright (c) 2017 Roopa Prabhu <[email protected]>
*
* Authors:
* Roopa Prabhu <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/neighbour.h>
#include <net/arp.h>
#include <linux/if_vlan.h>
#include <linux/inetdevice.h>
#include <net/addrconf.h>
#include <net/ipv6_stubs.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ip6_checksum.h>
#endif
#include "br_private.h"
void br_recalculate_neigh_suppress_enabled(struct net_bridge *br)
{
struct net_bridge_port *p;
bool neigh_suppress = false;
list_for_each_entry(p, &br->port_list, list) {
if (p->flags & (BR_NEIGH_SUPPRESS | BR_NEIGH_VLAN_SUPPRESS)) {
neigh_suppress = true;
break;
}
}
br_opt_toggle(br, BROPT_NEIGH_SUPPRESS_ENABLED, neigh_suppress);
}
#if IS_ENABLED(CONFIG_INET)
static void br_arp_send(struct net_bridge *br, struct net_bridge_port *p,
struct net_device *dev, __be32 dest_ip, __be32 src_ip,
const unsigned char *dest_hw,
const unsigned char *src_hw,
const unsigned char *target_hw,
__be16 vlan_proto, u16 vlan_tci)
{
struct net_bridge_vlan_group *vg;
struct sk_buff *skb;
u16 pvid;
netdev_dbg(dev, "arp send dev %s dst %pI4 dst_hw %pM src %pI4 src_hw %pM\n",
dev->name, &dest_ip, dest_hw, &src_ip, src_hw);
if (!vlan_tci) {
arp_send(ARPOP_REPLY, ETH_P_ARP, dest_ip, dev, src_ip,
dest_hw, src_hw, target_hw);
return;
}
skb = arp_create(ARPOP_REPLY, ETH_P_ARP, dest_ip, dev, src_ip,
dest_hw, src_hw, target_hw);
if (!skb)
return;
if (p)
vg = nbp_vlan_group_rcu(p);
else
vg = br_vlan_group_rcu(br);
pvid = br_get_pvid(vg);
if (pvid == (vlan_tci & VLAN_VID_MASK))
vlan_tci = 0;
if (vlan_tci)
__vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
if (p) {
arp_xmit(skb);
} else {
skb_reset_mac_header(skb);
__skb_pull(skb, skb_network_offset(skb));
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->pkt_type = PACKET_HOST;
netif_rx(skb);
}
}
static int br_chk_addr_ip(struct net_device *dev,
struct netdev_nested_priv *priv)
{
__be32 ip = *(__be32 *)priv->data;
struct in_device *in_dev;
__be32 addr = 0;
in_dev = __in_dev_get_rcu(dev);
if (in_dev)
addr = inet_confirm_addr(dev_net(dev), in_dev, 0, ip,
RT_SCOPE_HOST);
if (addr == ip)
return 1;
return 0;
}
static bool br_is_local_ip(struct net_device *dev, __be32 ip)
{
struct netdev_nested_priv priv = {
.data = (void *)&ip,
};
if (br_chk_addr_ip(dev, &priv))
return true;
/* check if ip is configured on upper dev */
if (netdev_walk_all_upper_dev_rcu(dev, br_chk_addr_ip, &priv))
return true;
return false;
}
void br_do_proxy_suppress_arp(struct sk_buff *skb, struct net_bridge *br,
u16 vid, struct net_bridge_port *p)
{
struct net_device *dev = br->dev;
struct net_device *vlandev = dev;
struct neighbour *n;
struct arphdr *parp;
u8 *arpptr, *sha;
__be32 sip, tip;
BR_INPUT_SKB_CB(skb)->proxyarp_replied = 0;
if ((dev->flags & IFF_NOARP) ||
!pskb_may_pull(skb, arp_hdr_len(dev)))
return;
parp = arp_hdr(skb);
if (parp->ar_pro != htons(ETH_P_IP) ||
parp->ar_hln != dev->addr_len ||
parp->ar_pln != 4)
return;
arpptr = (u8 *)parp + sizeof(struct arphdr);
sha = arpptr;
arpptr += dev->addr_len; /* sha */
memcpy(&sip, arpptr, sizeof(sip));
arpptr += sizeof(sip);
arpptr += dev->addr_len; /* tha */
memcpy(&tip, arpptr, sizeof(tip));
if (ipv4_is_loopback(tip) ||
ipv4_is_multicast(tip))
return;
if (br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) {
if (br_is_neigh_suppress_enabled(p, vid))
return;
if (parp->ar_op != htons(ARPOP_RREQUEST) &&
parp->ar_op != htons(ARPOP_RREPLY) &&
(ipv4_is_zeronet(sip) || sip == tip)) {
/* prevent flooding to neigh suppress ports */
BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1;
return;
}
}
if (parp->ar_op != htons(ARPOP_REQUEST))
return;
if (vid != 0) {
vlandev = __vlan_find_dev_deep_rcu(br->dev, skb->vlan_proto,
vid);
if (!vlandev)
return;
}
if (br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) &&
br_is_local_ip(vlandev, tip)) {
/* its our local ip, so don't proxy reply
* and don't forward to neigh suppress ports
*/
BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1;
return;
}
n = neigh_lookup(&arp_tbl, &tip, vlandev);
if (n) {
struct net_bridge_fdb_entry *f;
if (!(READ_ONCE(n->nud_state) & NUD_VALID)) {
neigh_release(n);
return;
}
f = br_fdb_find_rcu(br, n->ha, vid);
if (f) {
bool replied = false;
if ((p && (p->flags & BR_PROXYARP)) ||
(f->dst && (f->dst->flags & BR_PROXYARP_WIFI)) ||
br_is_neigh_suppress_enabled(f->dst, vid)) {
if (!vid)
br_arp_send(br, p, skb->dev, sip, tip,
sha, n->ha, sha, 0, 0);
else
br_arp_send(br, p, skb->dev, sip, tip,
sha, n->ha, sha,
skb->vlan_proto,
skb_vlan_tag_get(skb));
replied = true;
}
/* If we have replied or as long as we know the
* mac, indicate to arp replied
*/
if (replied ||
br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED))
BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1;
}
neigh_release(n);
}
}
#endif
#if IS_ENABLED(CONFIG_IPV6)
struct nd_msg *br_is_nd_neigh_msg(struct sk_buff *skb, struct nd_msg *msg)
{
struct nd_msg *m;
m = skb_header_pointer(skb, skb_network_offset(skb) +
sizeof(struct ipv6hdr), sizeof(*msg), msg);
if (!m)
return NULL;
if (m->icmph.icmp6_code != 0 ||
(m->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION &&
m->icmph.icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT))
return NULL;
return m;
}
static void br_nd_send(struct net_bridge *br, struct net_bridge_port *p,
struct sk_buff *request, struct neighbour *n,
__be16 vlan_proto, u16 vlan_tci, struct nd_msg *ns)
{
struct net_device *dev = request->dev;
struct net_bridge_vlan_group *vg;
struct sk_buff *reply;
struct nd_msg *na;
struct ipv6hdr *pip6;
int na_olen = 8; /* opt hdr + ETH_ALEN for target */
int ns_olen;
int i, len;
u8 *daddr;
u16 pvid;
if (!dev)
return;
len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
sizeof(*na) + na_olen + dev->needed_tailroom;
reply = alloc_skb(len, GFP_ATOMIC);
if (!reply)
return;
reply->protocol = htons(ETH_P_IPV6);
reply->dev = dev;
skb_reserve(reply, LL_RESERVED_SPACE(dev));
skb_push(reply, sizeof(struct ethhdr));
skb_set_mac_header(reply, 0);
daddr = eth_hdr(request)->h_source;
/* Do we need option processing ? */
ns_olen = request->len - (skb_network_offset(request) +
sizeof(struct ipv6hdr)) - sizeof(*ns);
for (i = 0; i < ns_olen - 1; i += (ns->opt[i + 1] << 3)) {
if (!ns->opt[i + 1]) {
kfree_skb(reply);
return;
}
if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
break;
}
}
/* Ethernet header */
ether_addr_copy(eth_hdr(reply)->h_dest, daddr);
ether_addr_copy(eth_hdr(reply)->h_source, n->ha);
eth_hdr(reply)->h_proto = htons(ETH_P_IPV6);
reply->protocol = htons(ETH_P_IPV6);
skb_pull(reply, sizeof(struct ethhdr));
skb_set_network_header(reply, 0);
skb_put(reply, sizeof(struct ipv6hdr));
/* IPv6 header */
pip6 = ipv6_hdr(reply);
memset(pip6, 0, sizeof(struct ipv6hdr));
pip6->version = 6;
pip6->priority = ipv6_hdr(request)->priority;
pip6->nexthdr = IPPROTO_ICMPV6;
pip6->hop_limit = 255;
pip6->daddr = ipv6_hdr(request)->saddr;
pip6->saddr = *(struct in6_addr *)n->primary_key;
skb_pull(reply, sizeof(struct ipv6hdr));
skb_set_transport_header(reply, 0);
na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen);
/* Neighbor Advertisement */
memset(na, 0, sizeof(*na) + na_olen);
na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
na->icmph.icmp6_router = (n->flags & NTF_ROUTER) ? 1 : 0;
na->icmph.icmp6_override = 1;
na->icmph.icmp6_solicited = 1;
na->target = ns->target;
ether_addr_copy(&na->opt[2], n->ha);
na->opt[0] = ND_OPT_TARGET_LL_ADDR;
na->opt[1] = na_olen >> 3;
na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr,
&pip6->daddr,
sizeof(*na) + na_olen,
IPPROTO_ICMPV6,
csum_partial(na, sizeof(*na) + na_olen, 0));
pip6->payload_len = htons(sizeof(*na) + na_olen);
skb_push(reply, sizeof(struct ipv6hdr));
skb_push(reply, sizeof(struct ethhdr));
reply->ip_summed = CHECKSUM_UNNECESSARY;
if (p)
vg = nbp_vlan_group_rcu(p);
else
vg = br_vlan_group_rcu(br);
pvid = br_get_pvid(vg);
if (pvid == (vlan_tci & VLAN_VID_MASK))
vlan_tci = 0;
if (vlan_tci)
__vlan_hwaccel_put_tag(reply, vlan_proto, vlan_tci);
netdev_dbg(dev, "nd send dev %s dst %pI6 dst_hw %pM src %pI6 src_hw %pM\n",
dev->name, &pip6->daddr, daddr, &pip6->saddr, n->ha);
if (p) {
dev_queue_xmit(reply);
} else {
skb_reset_mac_header(reply);
__skb_pull(reply, skb_network_offset(reply));
reply->ip_summed = CHECKSUM_UNNECESSARY;
reply->pkt_type = PACKET_HOST;
netif_rx(reply);
}
}
static int br_chk_addr_ip6(struct net_device *dev,
struct netdev_nested_priv *priv)
{
struct in6_addr *addr = (struct in6_addr *)priv->data;
if (ipv6_chk_addr(dev_net(dev), addr, dev, 0))
return 1;
return 0;
}
static bool br_is_local_ip6(struct net_device *dev, struct in6_addr *addr)
{
struct netdev_nested_priv priv = {
.data = (void *)addr,
};
if (br_chk_addr_ip6(dev, &priv))
return true;
/* check if ip is configured on upper dev */
if (netdev_walk_all_upper_dev_rcu(dev, br_chk_addr_ip6, &priv))
return true;
return false;
}
void br_do_suppress_nd(struct sk_buff *skb, struct net_bridge *br,
u16 vid, struct net_bridge_port *p, struct nd_msg *msg)
{
struct net_device *dev = br->dev;
struct net_device *vlandev = NULL;
struct in6_addr *saddr, *daddr;
struct ipv6hdr *iphdr;
struct neighbour *n;
BR_INPUT_SKB_CB(skb)->proxyarp_replied = 0;
if (br_is_neigh_suppress_enabled(p, vid))
return;
if (msg->icmph.icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT &&
!msg->icmph.icmp6_solicited) {
/* prevent flooding to neigh suppress ports */
BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1;
return;
}
if (msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
return;
iphdr = ipv6_hdr(skb);
saddr = &iphdr->saddr;
daddr = &iphdr->daddr;
if (ipv6_addr_any(saddr) || !ipv6_addr_cmp(saddr, daddr)) {
/* prevent flooding to neigh suppress ports */
BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1;
return;
}
if (vid != 0) {
/* build neigh table lookup on the vlan device */
vlandev = __vlan_find_dev_deep_rcu(br->dev, skb->vlan_proto,
vid);
if (!vlandev)
return;
} else {
vlandev = dev;
}
if (br_is_local_ip6(vlandev, &msg->target)) {
/* its our own ip, so don't proxy reply
* and don't forward to arp suppress ports
*/
BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1;
return;
}
n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, vlandev);
if (n) {
struct net_bridge_fdb_entry *f;
if (!(READ_ONCE(n->nud_state) & NUD_VALID)) {
neigh_release(n);
return;
}
f = br_fdb_find_rcu(br, n->ha, vid);
if (f) {
bool replied = false;
if (br_is_neigh_suppress_enabled(f->dst, vid)) {
if (vid != 0)
br_nd_send(br, p, skb, n,
skb->vlan_proto,
skb_vlan_tag_get(skb), msg);
else
br_nd_send(br, p, skb, n, 0, 0, msg);
replied = true;
}
/* If we have replied or as long as we know the
* mac, indicate to NEIGH_SUPPRESS ports that we
* have replied
*/
if (replied ||
br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED))
BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1;
}
neigh_release(n);
}
}
#endif
bool br_is_neigh_suppress_enabled(const struct net_bridge_port *p, u16 vid)
{
if (!p)
return false;
if (!vid)
return !!(p->flags & BR_NEIGH_SUPPRESS);
if (p->flags & BR_NEIGH_VLAN_SUPPRESS) {
struct net_bridge_vlan_group *vg = nbp_vlan_group_rcu(p);
struct net_bridge_vlan *v;
v = br_vlan_find(vg, vid);
if (!v)
return false;
return !!(v->priv_flags & BR_VLFLAG_NEIGH_SUPPRESS_ENABLED);
} else {
return !!(p->flags & BR_NEIGH_SUPPRESS);
}
}
| linux-master | net/bridge/br_arp_nd_proxy.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Userspace interface
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/netpoll.h>
#include <linux/ethtool.h>
#include <linux/if_arp.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/rtnetlink.h>
#include <linux/if_ether.h>
#include <linux/slab.h>
#include <net/dsa.h>
#include <net/sock.h>
#include <linux/if_vlan.h>
#include <net/switchdev.h>
#include <net/net_namespace.h>
#include "br_private.h"
/*
* Determine initial path cost based on speed.
* using recommendations from 802.1d standard
*
* Since driver might sleep need to not be holding any locks.
*/
static int port_cost(struct net_device *dev)
{
struct ethtool_link_ksettings ecmd;
if (!__ethtool_get_link_ksettings(dev, &ecmd)) {
switch (ecmd.base.speed) {
case SPEED_10000:
return 2;
case SPEED_5000:
return 3;
case SPEED_2500:
return 4;
case SPEED_1000:
return 5;
case SPEED_100:
return 19;
case SPEED_10:
return 100;
case SPEED_UNKNOWN:
return 100;
default:
if (ecmd.base.speed > SPEED_10000)
return 1;
}
}
/* Old silly heuristics based on name */
if (!strncmp(dev->name, "lec", 3))
return 7;
if (!strncmp(dev->name, "plip", 4))
return 2500;
return 100; /* assume old 10Mbps */
}
/* Check for port carrier transitions. */
void br_port_carrier_check(struct net_bridge_port *p, bool *notified)
{
struct net_device *dev = p->dev;
struct net_bridge *br = p->br;
if (!(p->flags & BR_ADMIN_COST) &&
netif_running(dev) && netif_oper_up(dev))
p->path_cost = port_cost(dev);
*notified = false;
if (!netif_running(br->dev))
return;
spin_lock_bh(&br->lock);
if (netif_running(dev) && netif_oper_up(dev)) {
if (p->state == BR_STATE_DISABLED) {
br_stp_enable_port(p);
*notified = true;
}
} else {
if (p->state != BR_STATE_DISABLED) {
br_stp_disable_port(p);
*notified = true;
}
}
spin_unlock_bh(&br->lock);
}
static void br_port_set_promisc(struct net_bridge_port *p)
{
int err = 0;
if (br_promisc_port(p))
return;
err = dev_set_promiscuity(p->dev, 1);
if (err)
return;
br_fdb_unsync_static(p->br, p);
p->flags |= BR_PROMISC;
}
static void br_port_clear_promisc(struct net_bridge_port *p)
{
int err;
/* Check if the port is already non-promisc or if it doesn't
* support UNICAST filtering. Without unicast filtering support
* we'll end up re-enabling promisc mode anyway, so just check for
* it here.
*/
if (!br_promisc_port(p) || !(p->dev->priv_flags & IFF_UNICAST_FLT))
return;
/* Since we'll be clearing the promisc mode, program the port
* first so that we don't have interruption in traffic.
*/
err = br_fdb_sync_static(p->br, p);
if (err)
return;
dev_set_promiscuity(p->dev, -1);
p->flags &= ~BR_PROMISC;
}
/* When a port is added or removed or when certain port flags
* change, this function is called to automatically manage
* promiscuity setting of all the bridge ports. We are always called
* under RTNL so can skip using rcu primitives.
*/
void br_manage_promisc(struct net_bridge *br)
{
struct net_bridge_port *p;
bool set_all = false;
/* If vlan filtering is disabled or bridge interface is placed
* into promiscuous mode, place all ports in promiscuous mode.
*/
if ((br->dev->flags & IFF_PROMISC) || !br_vlan_enabled(br->dev))
set_all = true;
list_for_each_entry(p, &br->port_list, list) {
if (set_all) {
br_port_set_promisc(p);
} else {
/* If the number of auto-ports is <= 1, then all other
* ports will have their output configuration
* statically specified through fdbs. Since ingress
* on the auto-port becomes forwarding/egress to other
* ports and egress configuration is statically known,
* we can say that ingress configuration of the
* auto-port is also statically known.
* This lets us disable promiscuous mode and write
* this config to hw.
*/
if ((p->dev->priv_flags & IFF_UNICAST_FLT) &&
(br->auto_cnt == 0 ||
(br->auto_cnt == 1 && br_auto_port(p))))
br_port_clear_promisc(p);
else
br_port_set_promisc(p);
}
}
}
int nbp_backup_change(struct net_bridge_port *p,
struct net_device *backup_dev)
{
struct net_bridge_port *old_backup = rtnl_dereference(p->backup_port);
struct net_bridge_port *backup_p = NULL;
ASSERT_RTNL();
if (backup_dev) {
if (!netif_is_bridge_port(backup_dev))
return -ENOENT;
backup_p = br_port_get_rtnl(backup_dev);
if (backup_p->br != p->br)
return -EINVAL;
}
if (p == backup_p)
return -EINVAL;
if (old_backup == backup_p)
return 0;
/* if the backup link is already set, clear it */
if (old_backup)
old_backup->backup_redirected_cnt--;
if (backup_p)
backup_p->backup_redirected_cnt++;
rcu_assign_pointer(p->backup_port, backup_p);
return 0;
}
static void nbp_backup_clear(struct net_bridge_port *p)
{
nbp_backup_change(p, NULL);
if (p->backup_redirected_cnt) {
struct net_bridge_port *cur_p;
list_for_each_entry(cur_p, &p->br->port_list, list) {
struct net_bridge_port *backup_p;
backup_p = rtnl_dereference(cur_p->backup_port);
if (backup_p == p)
nbp_backup_change(cur_p, NULL);
}
}
WARN_ON(rcu_access_pointer(p->backup_port) || p->backup_redirected_cnt);
}
static void nbp_update_port_count(struct net_bridge *br)
{
struct net_bridge_port *p;
u32 cnt = 0;
list_for_each_entry(p, &br->port_list, list) {
if (br_auto_port(p))
cnt++;
}
if (br->auto_cnt != cnt) {
br->auto_cnt = cnt;
br_manage_promisc(br);
}
}
static void nbp_delete_promisc(struct net_bridge_port *p)
{
/* If port is currently promiscuous, unset promiscuity.
* Otherwise, it is a static port so remove all addresses
* from it.
*/
dev_set_allmulti(p->dev, -1);
if (br_promisc_port(p))
dev_set_promiscuity(p->dev, -1);
else
br_fdb_unsync_static(p->br, p);
}
static void release_nbp(struct kobject *kobj)
{
struct net_bridge_port *p
= container_of(kobj, struct net_bridge_port, kobj);
kfree(p);
}
static void brport_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t *gid)
{
struct net_bridge_port *p = kobj_to_brport(kobj);
net_ns_get_ownership(dev_net(p->dev), uid, gid);
}
static const struct kobj_type brport_ktype = {
#ifdef CONFIG_SYSFS
.sysfs_ops = &brport_sysfs_ops,
#endif
.release = release_nbp,
.get_ownership = brport_get_ownership,
};
static void destroy_nbp(struct net_bridge_port *p)
{
struct net_device *dev = p->dev;
p->br = NULL;
p->dev = NULL;
netdev_put(dev, &p->dev_tracker);
kobject_put(&p->kobj);
}
static void destroy_nbp_rcu(struct rcu_head *head)
{
struct net_bridge_port *p =
container_of(head, struct net_bridge_port, rcu);
destroy_nbp(p);
}
static unsigned get_max_headroom(struct net_bridge *br)
{
unsigned max_headroom = 0;
struct net_bridge_port *p;
list_for_each_entry(p, &br->port_list, list) {
unsigned dev_headroom = netdev_get_fwd_headroom(p->dev);
if (dev_headroom > max_headroom)
max_headroom = dev_headroom;
}
return max_headroom;
}
static void update_headroom(struct net_bridge *br, int new_hr)
{
struct net_bridge_port *p;
list_for_each_entry(p, &br->port_list, list)
netdev_set_rx_headroom(p->dev, new_hr);
br->dev->needed_headroom = new_hr;
}
/* Delete port(interface) from bridge is done in two steps.
* via RCU. First step, marks device as down. That deletes
* all the timers and stops new packets from flowing through.
*
* Final cleanup doesn't occur until after all CPU's finished
* processing packets.
*
* Protected from multiple admin operations by RTNL mutex
*/
static void del_nbp(struct net_bridge_port *p)
{
struct net_bridge *br = p->br;
struct net_device *dev = p->dev;
sysfs_remove_link(br->ifobj, p->dev->name);
nbp_delete_promisc(p);
spin_lock_bh(&br->lock);
br_stp_disable_port(p);
spin_unlock_bh(&br->lock);
br_mrp_port_del(br, p);
br_cfm_port_del(br, p);
br_ifinfo_notify(RTM_DELLINK, NULL, p);
list_del_rcu(&p->list);
if (netdev_get_fwd_headroom(dev) == br->dev->needed_headroom)
update_headroom(br, get_max_headroom(br));
netdev_reset_rx_headroom(dev);
nbp_vlan_flush(p);
br_fdb_delete_by_port(br, p, 0, 1);
switchdev_deferred_process();
nbp_backup_clear(p);
nbp_update_port_count(br);
netdev_upper_dev_unlink(dev, br->dev);
dev->priv_flags &= ~IFF_BRIDGE_PORT;
netdev_rx_handler_unregister(dev);
br_multicast_del_port(p);
kobject_uevent(&p->kobj, KOBJ_REMOVE);
kobject_del(&p->kobj);
br_netpoll_disable(p);
call_rcu(&p->rcu, destroy_nbp_rcu);
}
/* Delete bridge device */
void br_dev_delete(struct net_device *dev, struct list_head *head)
{
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_port *p, *n;
list_for_each_entry_safe(p, n, &br->port_list, list) {
del_nbp(p);
}
br_recalculate_neigh_suppress_enabled(br);
br_fdb_delete_by_port(br, NULL, 0, 1);
cancel_delayed_work_sync(&br->gc_work);
br_sysfs_delbr(br->dev);
unregister_netdevice_queue(br->dev, head);
}
/* find an available port number */
static int find_portno(struct net_bridge *br)
{
int index;
struct net_bridge_port *p;
unsigned long *inuse;
inuse = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
if (!inuse)
return -ENOMEM;
__set_bit(0, inuse); /* zero is reserved */
list_for_each_entry(p, &br->port_list, list)
__set_bit(p->port_no, inuse);
index = find_first_zero_bit(inuse, BR_MAX_PORTS);
bitmap_free(inuse);
return (index >= BR_MAX_PORTS) ? -EXFULL : index;
}
/* called with RTNL but without bridge lock */
static struct net_bridge_port *new_nbp(struct net_bridge *br,
struct net_device *dev)
{
struct net_bridge_port *p;
int index, err;
index = find_portno(br);
if (index < 0)
return ERR_PTR(index);
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (p == NULL)
return ERR_PTR(-ENOMEM);
p->br = br;
netdev_hold(dev, &p->dev_tracker, GFP_KERNEL);
p->dev = dev;
p->path_cost = port_cost(dev);
p->priority = 0x8000 >> BR_PORT_BITS;
p->port_no = index;
p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
br_init_port(p);
br_set_state(p, BR_STATE_DISABLED);
br_stp_port_timer_init(p);
err = br_multicast_add_port(p);
if (err) {
netdev_put(dev, &p->dev_tracker);
kfree(p);
p = ERR_PTR(err);
}
return p;
}
int br_add_bridge(struct net *net, const char *name)
{
struct net_device *dev;
int res;
dev = alloc_netdev(sizeof(struct net_bridge), name, NET_NAME_UNKNOWN,
br_dev_setup);
if (!dev)
return -ENOMEM;
dev_net_set(dev, net);
dev->rtnl_link_ops = &br_link_ops;
res = register_netdevice(dev);
if (res)
free_netdev(dev);
return res;
}
int br_del_bridge(struct net *net, const char *name)
{
struct net_device *dev;
int ret = 0;
dev = __dev_get_by_name(net, name);
if (dev == NULL)
ret = -ENXIO; /* Could not find device */
else if (!netif_is_bridge_master(dev)) {
/* Attempt to delete non bridge device! */
ret = -EPERM;
}
else if (dev->flags & IFF_UP) {
/* Not shutdown yet. */
ret = -EBUSY;
}
else
br_dev_delete(dev, NULL);
return ret;
}
/* MTU of the bridge pseudo-device: ETH_DATA_LEN or the minimum of the ports */
static int br_mtu_min(const struct net_bridge *br)
{
const struct net_bridge_port *p;
int ret_mtu = 0;
list_for_each_entry(p, &br->port_list, list)
if (!ret_mtu || ret_mtu > p->dev->mtu)
ret_mtu = p->dev->mtu;
return ret_mtu ? ret_mtu : ETH_DATA_LEN;
}
void br_mtu_auto_adjust(struct net_bridge *br)
{
ASSERT_RTNL();
/* if the bridge MTU was manually configured don't mess with it */
if (br_opt_get(br, BROPT_MTU_SET_BY_USER))
return;
/* change to the minimum MTU and clear the flag which was set by
* the bridge ndo_change_mtu callback
*/
dev_set_mtu(br->dev, br_mtu_min(br));
br_opt_toggle(br, BROPT_MTU_SET_BY_USER, false);
}
static void br_set_gso_limits(struct net_bridge *br)
{
unsigned int tso_max_size = TSO_MAX_SIZE;
const struct net_bridge_port *p;
u16 tso_max_segs = TSO_MAX_SEGS;
list_for_each_entry(p, &br->port_list, list) {
tso_max_size = min(tso_max_size, p->dev->tso_max_size);
tso_max_segs = min(tso_max_segs, p->dev->tso_max_segs);
}
netif_set_tso_max_size(br->dev, tso_max_size);
netif_set_tso_max_segs(br->dev, tso_max_segs);
}
/*
* Recomputes features using slave's features
*/
netdev_features_t br_features_recompute(struct net_bridge *br,
netdev_features_t features)
{
struct net_bridge_port *p;
netdev_features_t mask;
if (list_empty(&br->port_list))
return features;
mask = features;
features &= ~NETIF_F_ONE_FOR_ALL;
list_for_each_entry(p, &br->port_list, list) {
features = netdev_increment_features(features,
p->dev->features, mask);
}
features = netdev_add_tso_features(features, mask);
return features;
}
/* called with RTNL */
int br_add_if(struct net_bridge *br, struct net_device *dev,
struct netlink_ext_ack *extack)
{
struct net_bridge_port *p;
int err = 0;
unsigned br_hr, dev_hr;
bool changed_addr, fdb_synced = false;
/* Don't allow bridging non-ethernet like devices. */
if ((dev->flags & IFF_LOOPBACK) ||
dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN ||
!is_valid_ether_addr(dev->dev_addr))
return -EINVAL;
/* No bridging of bridges */
if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit) {
NL_SET_ERR_MSG(extack,
"Can not enslave a bridge to a bridge");
return -ELOOP;
}
/* Device has master upper dev */
if (netdev_master_upper_dev_get(dev))
return -EBUSY;
/* No bridging devices that dislike that (e.g. wireless) */
if (dev->priv_flags & IFF_DONT_BRIDGE) {
NL_SET_ERR_MSG(extack,
"Device does not allow enslaving to a bridge");
return -EOPNOTSUPP;
}
p = new_nbp(br, dev);
if (IS_ERR(p))
return PTR_ERR(p);
call_netdevice_notifiers(NETDEV_JOIN, dev);
err = dev_set_allmulti(dev, 1);
if (err) {
br_multicast_del_port(p);
netdev_put(dev, &p->dev_tracker);
kfree(p); /* kobject not yet init'd, manually free */
goto err1;
}
err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj),
SYSFS_BRIDGE_PORT_ATTR);
if (err)
goto err2;
err = br_sysfs_addif(p);
if (err)
goto err2;
err = br_netpoll_enable(p);
if (err)
goto err3;
err = netdev_rx_handler_register(dev, br_get_rx_handler(dev), p);
if (err)
goto err4;
dev->priv_flags |= IFF_BRIDGE_PORT;
err = netdev_master_upper_dev_link(dev, br->dev, NULL, NULL, extack);
if (err)
goto err5;
dev_disable_lro(dev);
list_add_rcu(&p->list, &br->port_list);
nbp_update_port_count(br);
if (!br_promisc_port(p) && (p->dev->priv_flags & IFF_UNICAST_FLT)) {
/* When updating the port count we also update all ports'
* promiscuous mode.
* A port leaving promiscuous mode normally gets the bridge's
* fdb synced to the unicast filter (if supported), however,
* `br_port_clear_promisc` does not distinguish between
* non-promiscuous ports and *new* ports, so we need to
* sync explicitly here.
*/
fdb_synced = br_fdb_sync_static(br, p) == 0;
if (!fdb_synced)
netdev_err(dev, "failed to sync bridge static fdb addresses to this port\n");
}
netdev_update_features(br->dev);
br_hr = br->dev->needed_headroom;
dev_hr = netdev_get_fwd_headroom(dev);
if (br_hr < dev_hr)
update_headroom(br, dev_hr);
else
netdev_set_rx_headroom(dev, br_hr);
if (br_fdb_add_local(br, p, dev->dev_addr, 0))
netdev_err(dev, "failed insert local address bridge forwarding table\n");
if (br->dev->addr_assign_type != NET_ADDR_SET) {
/* Ask for permission to use this MAC address now, even if we
* don't end up choosing it below.
*/
err = dev_pre_changeaddr_notify(br->dev, dev->dev_addr, extack);
if (err)
goto err6;
}
err = nbp_vlan_init(p, extack);
if (err) {
netdev_err(dev, "failed to initialize vlan filtering on this port\n");
goto err6;
}
spin_lock_bh(&br->lock);
changed_addr = br_stp_recalculate_bridge_id(br);
if (netif_running(dev) && netif_oper_up(dev) &&
(br->dev->flags & IFF_UP))
br_stp_enable_port(p);
spin_unlock_bh(&br->lock);
br_ifinfo_notify(RTM_NEWLINK, NULL, p);
if (changed_addr)
call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
br_mtu_auto_adjust(br);
br_set_gso_limits(br);
kobject_uevent(&p->kobj, KOBJ_ADD);
return 0;
err6:
if (fdb_synced)
br_fdb_unsync_static(br, p);
list_del_rcu(&p->list);
br_fdb_delete_by_port(br, p, 0, 1);
nbp_update_port_count(br);
netdev_upper_dev_unlink(dev, br->dev);
err5:
dev->priv_flags &= ~IFF_BRIDGE_PORT;
netdev_rx_handler_unregister(dev);
err4:
br_netpoll_disable(p);
err3:
sysfs_remove_link(br->ifobj, p->dev->name);
err2:
br_multicast_del_port(p);
netdev_put(dev, &p->dev_tracker);
kobject_put(&p->kobj);
dev_set_allmulti(dev, -1);
err1:
return err;
}
/* called with RTNL */
int br_del_if(struct net_bridge *br, struct net_device *dev)
{
struct net_bridge_port *p;
bool changed_addr;
p = br_port_get_rtnl(dev);
if (!p || p->br != br)
return -EINVAL;
/* Since more than one interface can be attached to a bridge,
* there still maybe an alternate path for netconsole to use;
* therefore there is no reason for a NETDEV_RELEASE event.
*/
del_nbp(p);
br_mtu_auto_adjust(br);
br_set_gso_limits(br);
spin_lock_bh(&br->lock);
changed_addr = br_stp_recalculate_bridge_id(br);
spin_unlock_bh(&br->lock);
if (changed_addr)
call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
netdev_update_features(br->dev);
return 0;
}
void br_port_flags_change(struct net_bridge_port *p, unsigned long mask)
{
struct net_bridge *br = p->br;
if (mask & BR_AUTO_MASK)
nbp_update_port_count(br);
if (mask & (BR_NEIGH_SUPPRESS | BR_NEIGH_VLAN_SUPPRESS))
br_recalculate_neigh_suppress_enabled(br);
}
bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag)
{
struct net_bridge_port *p;
p = br_port_get_rtnl_rcu(dev);
if (!p)
return false;
return p->flags & flag;
}
EXPORT_SYMBOL_GPL(br_port_flag_is_set);
| linux-master | net/bridge/br_if.c |
// SPDX-License-Identifier: GPL-2.0-or-later
// Copyright (c) 2020, Nikolay Aleksandrov <[email protected]>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/if_ether.h>
#include <linux/igmp.h>
#include <linux/in.h>
#include <linux/jhash.h>
#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/netdevice.h>
#include <linux/netfilter_bridge.h>
#include <linux/random.h>
#include <linux/rculist.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/inetdevice.h>
#include <linux/mroute.h>
#include <net/ip.h>
#include <net/switchdev.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <linux/icmpv6.h>
#include <net/ipv6.h>
#include <net/mld.h>
#include <net/ip6_checksum.h>
#include <net/addrconf.h>
#endif
#include "br_private.h"
#include "br_private_mcast_eht.h"
static bool br_multicast_del_eht_set_entry(struct net_bridge_port_group *pg,
union net_bridge_eht_addr *src_addr,
union net_bridge_eht_addr *h_addr);
static void br_multicast_create_eht_set_entry(const struct net_bridge_mcast *brmctx,
struct net_bridge_port_group *pg,
union net_bridge_eht_addr *src_addr,
union net_bridge_eht_addr *h_addr,
int filter_mode,
bool allow_zero_src);
static struct net_bridge_group_eht_host *
br_multicast_eht_host_lookup(struct net_bridge_port_group *pg,
union net_bridge_eht_addr *h_addr)
{
struct rb_node *node = pg->eht_host_tree.rb_node;
while (node) {
struct net_bridge_group_eht_host *this;
int result;
this = rb_entry(node, struct net_bridge_group_eht_host,
rb_node);
result = memcmp(h_addr, &this->h_addr, sizeof(*h_addr));
if (result < 0)
node = node->rb_left;
else if (result > 0)
node = node->rb_right;
else
return this;
}
return NULL;
}
static int br_multicast_eht_host_filter_mode(struct net_bridge_port_group *pg,
union net_bridge_eht_addr *h_addr)
{
struct net_bridge_group_eht_host *eht_host;
eht_host = br_multicast_eht_host_lookup(pg, h_addr);
if (!eht_host)
return MCAST_INCLUDE;
return eht_host->filter_mode;
}
static struct net_bridge_group_eht_set_entry *
br_multicast_eht_set_entry_lookup(struct net_bridge_group_eht_set *eht_set,
union net_bridge_eht_addr *h_addr)
{
struct rb_node *node = eht_set->entry_tree.rb_node;
while (node) {
struct net_bridge_group_eht_set_entry *this;
int result;
this = rb_entry(node, struct net_bridge_group_eht_set_entry,
rb_node);
result = memcmp(h_addr, &this->h_addr, sizeof(*h_addr));
if (result < 0)
node = node->rb_left;
else if (result > 0)
node = node->rb_right;
else
return this;
}
return NULL;
}
static struct net_bridge_group_eht_set *
br_multicast_eht_set_lookup(struct net_bridge_port_group *pg,
union net_bridge_eht_addr *src_addr)
{
struct rb_node *node = pg->eht_set_tree.rb_node;
while (node) {
struct net_bridge_group_eht_set *this;
int result;
this = rb_entry(node, struct net_bridge_group_eht_set,
rb_node);
result = memcmp(src_addr, &this->src_addr, sizeof(*src_addr));
if (result < 0)
node = node->rb_left;
else if (result > 0)
node = node->rb_right;
else
return this;
}
return NULL;
}
static void __eht_destroy_host(struct net_bridge_group_eht_host *eht_host)
{
WARN_ON(!hlist_empty(&eht_host->set_entries));
br_multicast_eht_hosts_dec(eht_host->pg);
rb_erase(&eht_host->rb_node, &eht_host->pg->eht_host_tree);
RB_CLEAR_NODE(&eht_host->rb_node);
kfree(eht_host);
}
static void br_multicast_destroy_eht_set_entry(struct net_bridge_mcast_gc *gc)
{
struct net_bridge_group_eht_set_entry *set_h;
set_h = container_of(gc, struct net_bridge_group_eht_set_entry, mcast_gc);
WARN_ON(!RB_EMPTY_NODE(&set_h->rb_node));
timer_shutdown_sync(&set_h->timer);
kfree(set_h);
}
static void br_multicast_destroy_eht_set(struct net_bridge_mcast_gc *gc)
{
struct net_bridge_group_eht_set *eht_set;
eht_set = container_of(gc, struct net_bridge_group_eht_set, mcast_gc);
WARN_ON(!RB_EMPTY_NODE(&eht_set->rb_node));
WARN_ON(!RB_EMPTY_ROOT(&eht_set->entry_tree));
timer_shutdown_sync(&eht_set->timer);
kfree(eht_set);
}
static void __eht_del_set_entry(struct net_bridge_group_eht_set_entry *set_h)
{
struct net_bridge_group_eht_host *eht_host = set_h->h_parent;
union net_bridge_eht_addr zero_addr;
rb_erase(&set_h->rb_node, &set_h->eht_set->entry_tree);
RB_CLEAR_NODE(&set_h->rb_node);
hlist_del_init(&set_h->host_list);
memset(&zero_addr, 0, sizeof(zero_addr));
if (memcmp(&set_h->h_addr, &zero_addr, sizeof(zero_addr)))
eht_host->num_entries--;
hlist_add_head(&set_h->mcast_gc.gc_node, &set_h->br->mcast_gc_list);
queue_work(system_long_wq, &set_h->br->mcast_gc_work);
if (hlist_empty(&eht_host->set_entries))
__eht_destroy_host(eht_host);
}
static void br_multicast_del_eht_set(struct net_bridge_group_eht_set *eht_set)
{
struct net_bridge_group_eht_set_entry *set_h;
struct rb_node *node;
while ((node = rb_first(&eht_set->entry_tree))) {
set_h = rb_entry(node, struct net_bridge_group_eht_set_entry,
rb_node);
__eht_del_set_entry(set_h);
}
rb_erase(&eht_set->rb_node, &eht_set->pg->eht_set_tree);
RB_CLEAR_NODE(&eht_set->rb_node);
hlist_add_head(&eht_set->mcast_gc.gc_node, &eht_set->br->mcast_gc_list);
queue_work(system_long_wq, &eht_set->br->mcast_gc_work);
}
void br_multicast_eht_clean_sets(struct net_bridge_port_group *pg)
{
struct net_bridge_group_eht_set *eht_set;
struct rb_node *node;
while ((node = rb_first(&pg->eht_set_tree))) {
eht_set = rb_entry(node, struct net_bridge_group_eht_set,
rb_node);
br_multicast_del_eht_set(eht_set);
}
}
static void br_multicast_eht_set_entry_expired(struct timer_list *t)
{
struct net_bridge_group_eht_set_entry *set_h = from_timer(set_h, t, timer);
struct net_bridge *br = set_h->br;
spin_lock(&br->multicast_lock);
if (RB_EMPTY_NODE(&set_h->rb_node) || timer_pending(&set_h->timer))
goto out;
br_multicast_del_eht_set_entry(set_h->eht_set->pg,
&set_h->eht_set->src_addr,
&set_h->h_addr);
out:
spin_unlock(&br->multicast_lock);
}
static void br_multicast_eht_set_expired(struct timer_list *t)
{
struct net_bridge_group_eht_set *eht_set = from_timer(eht_set, t,
timer);
struct net_bridge *br = eht_set->br;
spin_lock(&br->multicast_lock);
if (RB_EMPTY_NODE(&eht_set->rb_node) || timer_pending(&eht_set->timer))
goto out;
br_multicast_del_eht_set(eht_set);
out:
spin_unlock(&br->multicast_lock);
}
static struct net_bridge_group_eht_host *
__eht_lookup_create_host(struct net_bridge_port_group *pg,
union net_bridge_eht_addr *h_addr,
unsigned char filter_mode)
{
struct rb_node **link = &pg->eht_host_tree.rb_node, *parent = NULL;
struct net_bridge_group_eht_host *eht_host;
while (*link) {
struct net_bridge_group_eht_host *this;
int result;
this = rb_entry(*link, struct net_bridge_group_eht_host,
rb_node);
result = memcmp(h_addr, &this->h_addr, sizeof(*h_addr));
parent = *link;
if (result < 0)
link = &((*link)->rb_left);
else if (result > 0)
link = &((*link)->rb_right);
else
return this;
}
if (br_multicast_eht_hosts_over_limit(pg))
return NULL;
eht_host = kzalloc(sizeof(*eht_host), GFP_ATOMIC);
if (!eht_host)
return NULL;
memcpy(&eht_host->h_addr, h_addr, sizeof(*h_addr));
INIT_HLIST_HEAD(&eht_host->set_entries);
eht_host->pg = pg;
eht_host->filter_mode = filter_mode;
rb_link_node(&eht_host->rb_node, parent, link);
rb_insert_color(&eht_host->rb_node, &pg->eht_host_tree);
br_multicast_eht_hosts_inc(pg);
return eht_host;
}
static struct net_bridge_group_eht_set_entry *
__eht_lookup_create_set_entry(struct net_bridge *br,
struct net_bridge_group_eht_set *eht_set,
struct net_bridge_group_eht_host *eht_host,
bool allow_zero_src)
{
struct rb_node **link = &eht_set->entry_tree.rb_node, *parent = NULL;
struct net_bridge_group_eht_set_entry *set_h;
while (*link) {
struct net_bridge_group_eht_set_entry *this;
int result;
this = rb_entry(*link, struct net_bridge_group_eht_set_entry,
rb_node);
result = memcmp(&eht_host->h_addr, &this->h_addr,
sizeof(union net_bridge_eht_addr));
parent = *link;
if (result < 0)
link = &((*link)->rb_left);
else if (result > 0)
link = &((*link)->rb_right);
else
return this;
}
/* always allow auto-created zero entry */
if (!allow_zero_src && eht_host->num_entries >= PG_SRC_ENT_LIMIT)
return NULL;
set_h = kzalloc(sizeof(*set_h), GFP_ATOMIC);
if (!set_h)
return NULL;
memcpy(&set_h->h_addr, &eht_host->h_addr,
sizeof(union net_bridge_eht_addr));
set_h->mcast_gc.destroy = br_multicast_destroy_eht_set_entry;
set_h->eht_set = eht_set;
set_h->h_parent = eht_host;
set_h->br = br;
timer_setup(&set_h->timer, br_multicast_eht_set_entry_expired, 0);
hlist_add_head(&set_h->host_list, &eht_host->set_entries);
rb_link_node(&set_h->rb_node, parent, link);
rb_insert_color(&set_h->rb_node, &eht_set->entry_tree);
/* we must not count the auto-created zero entry otherwise we won't be
* able to track the full list of PG_SRC_ENT_LIMIT entries
*/
if (!allow_zero_src)
eht_host->num_entries++;
return set_h;
}
static struct net_bridge_group_eht_set *
__eht_lookup_create_set(struct net_bridge_port_group *pg,
union net_bridge_eht_addr *src_addr)
{
struct rb_node **link = &pg->eht_set_tree.rb_node, *parent = NULL;
struct net_bridge_group_eht_set *eht_set;
while (*link) {
struct net_bridge_group_eht_set *this;
int result;
this = rb_entry(*link, struct net_bridge_group_eht_set,
rb_node);
result = memcmp(src_addr, &this->src_addr, sizeof(*src_addr));
parent = *link;
if (result < 0)
link = &((*link)->rb_left);
else if (result > 0)
link = &((*link)->rb_right);
else
return this;
}
eht_set = kzalloc(sizeof(*eht_set), GFP_ATOMIC);
if (!eht_set)
return NULL;
memcpy(&eht_set->src_addr, src_addr, sizeof(*src_addr));
eht_set->mcast_gc.destroy = br_multicast_destroy_eht_set;
eht_set->pg = pg;
eht_set->br = pg->key.port->br;
eht_set->entry_tree = RB_ROOT;
timer_setup(&eht_set->timer, br_multicast_eht_set_expired, 0);
rb_link_node(&eht_set->rb_node, parent, link);
rb_insert_color(&eht_set->rb_node, &pg->eht_set_tree);
return eht_set;
}
static void br_multicast_ip_src_to_eht_addr(const struct br_ip *src,
union net_bridge_eht_addr *dest)
{
switch (src->proto) {
case htons(ETH_P_IP):
dest->ip4 = src->src.ip4;
break;
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
memcpy(&dest->ip6, &src->src.ip6, sizeof(struct in6_addr));
break;
#endif
}
}
static void br_eht_convert_host_filter_mode(const struct net_bridge_mcast *brmctx,
struct net_bridge_port_group *pg,
union net_bridge_eht_addr *h_addr,
int filter_mode)
{
struct net_bridge_group_eht_host *eht_host;
union net_bridge_eht_addr zero_addr;
eht_host = br_multicast_eht_host_lookup(pg, h_addr);
if (eht_host)
eht_host->filter_mode = filter_mode;
memset(&zero_addr, 0, sizeof(zero_addr));
switch (filter_mode) {
case MCAST_INCLUDE:
br_multicast_del_eht_set_entry(pg, &zero_addr, h_addr);
break;
case MCAST_EXCLUDE:
br_multicast_create_eht_set_entry(brmctx, pg, &zero_addr,
h_addr, MCAST_EXCLUDE,
true);
break;
}
}
static void br_multicast_create_eht_set_entry(const struct net_bridge_mcast *brmctx,
struct net_bridge_port_group *pg,
union net_bridge_eht_addr *src_addr,
union net_bridge_eht_addr *h_addr,
int filter_mode,
bool allow_zero_src)
{
struct net_bridge_group_eht_set_entry *set_h;
struct net_bridge_group_eht_host *eht_host;
struct net_bridge *br = pg->key.port->br;
struct net_bridge_group_eht_set *eht_set;
union net_bridge_eht_addr zero_addr;
memset(&zero_addr, 0, sizeof(zero_addr));
if (!allow_zero_src && !memcmp(src_addr, &zero_addr, sizeof(zero_addr)))
return;
eht_set = __eht_lookup_create_set(pg, src_addr);
if (!eht_set)
return;
eht_host = __eht_lookup_create_host(pg, h_addr, filter_mode);
if (!eht_host)
goto fail_host;
set_h = __eht_lookup_create_set_entry(br, eht_set, eht_host,
allow_zero_src);
if (!set_h)
goto fail_set_entry;
mod_timer(&set_h->timer, jiffies + br_multicast_gmi(brmctx));
mod_timer(&eht_set->timer, jiffies + br_multicast_gmi(brmctx));
return;
fail_set_entry:
if (hlist_empty(&eht_host->set_entries))
__eht_destroy_host(eht_host);
fail_host:
if (RB_EMPTY_ROOT(&eht_set->entry_tree))
br_multicast_del_eht_set(eht_set);
}
static bool br_multicast_del_eht_set_entry(struct net_bridge_port_group *pg,
union net_bridge_eht_addr *src_addr,
union net_bridge_eht_addr *h_addr)
{
struct net_bridge_group_eht_set_entry *set_h;
struct net_bridge_group_eht_set *eht_set;
bool set_deleted = false;
eht_set = br_multicast_eht_set_lookup(pg, src_addr);
if (!eht_set)
goto out;
set_h = br_multicast_eht_set_entry_lookup(eht_set, h_addr);
if (!set_h)
goto out;
__eht_del_set_entry(set_h);
if (RB_EMPTY_ROOT(&eht_set->entry_tree)) {
br_multicast_del_eht_set(eht_set);
set_deleted = true;
}
out:
return set_deleted;
}
static void br_multicast_del_eht_host(struct net_bridge_port_group *pg,
union net_bridge_eht_addr *h_addr)
{
struct net_bridge_group_eht_set_entry *set_h;
struct net_bridge_group_eht_host *eht_host;
struct hlist_node *tmp;
eht_host = br_multicast_eht_host_lookup(pg, h_addr);
if (!eht_host)
return;
hlist_for_each_entry_safe(set_h, tmp, &eht_host->set_entries, host_list)
br_multicast_del_eht_set_entry(set_h->eht_set->pg,
&set_h->eht_set->src_addr,
&set_h->h_addr);
}
/* create new set entries from reports */
static void __eht_create_set_entries(const struct net_bridge_mcast *brmctx,
struct net_bridge_port_group *pg,
union net_bridge_eht_addr *h_addr,
void *srcs,
u32 nsrcs,
size_t addr_size,
int filter_mode)
{
union net_bridge_eht_addr eht_src_addr;
u32 src_idx;
memset(&eht_src_addr, 0, sizeof(eht_src_addr));
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
memcpy(&eht_src_addr, srcs + (src_idx * addr_size), addr_size);
br_multicast_create_eht_set_entry(brmctx, pg, &eht_src_addr,
h_addr, filter_mode,
false);
}
}
/* delete existing set entries and their (S,G) entries if they were the last */
static bool __eht_del_set_entries(struct net_bridge_port_group *pg,
union net_bridge_eht_addr *h_addr,
void *srcs,
u32 nsrcs,
size_t addr_size)
{
union net_bridge_eht_addr eht_src_addr;
struct net_bridge_group_src *src_ent;
bool changed = false;
struct br_ip src_ip;
u32 src_idx;
memset(&eht_src_addr, 0, sizeof(eht_src_addr));
memset(&src_ip, 0, sizeof(src_ip));
src_ip.proto = pg->key.addr.proto;
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
memcpy(&eht_src_addr, srcs + (src_idx * addr_size), addr_size);
if (!br_multicast_del_eht_set_entry(pg, &eht_src_addr, h_addr))
continue;
memcpy(&src_ip, srcs + (src_idx * addr_size), addr_size);
src_ent = br_multicast_find_group_src(pg, &src_ip);
if (!src_ent)
continue;
br_multicast_del_group_src(src_ent, true);
changed = true;
}
return changed;
}
static bool br_multicast_eht_allow(const struct net_bridge_mcast *brmctx,
struct net_bridge_port_group *pg,
union net_bridge_eht_addr *h_addr,
void *srcs,
u32 nsrcs,
size_t addr_size)
{
bool changed = false;
switch (br_multicast_eht_host_filter_mode(pg, h_addr)) {
case MCAST_INCLUDE:
__eht_create_set_entries(brmctx, pg, h_addr, srcs, nsrcs,
addr_size, MCAST_INCLUDE);
break;
case MCAST_EXCLUDE:
changed = __eht_del_set_entries(pg, h_addr, srcs, nsrcs,
addr_size);
break;
}
return changed;
}
static bool br_multicast_eht_block(const struct net_bridge_mcast *brmctx,
struct net_bridge_port_group *pg,
union net_bridge_eht_addr *h_addr,
void *srcs,
u32 nsrcs,
size_t addr_size)
{
bool changed = false;
switch (br_multicast_eht_host_filter_mode(pg, h_addr)) {
case MCAST_INCLUDE:
changed = __eht_del_set_entries(pg, h_addr, srcs, nsrcs,
addr_size);
break;
case MCAST_EXCLUDE:
__eht_create_set_entries(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
MCAST_EXCLUDE);
break;
}
return changed;
}
/* flush_entries is true when changing mode */
static bool __eht_inc_exc(const struct net_bridge_mcast *brmctx,
struct net_bridge_port_group *pg,
union net_bridge_eht_addr *h_addr,
void *srcs,
u32 nsrcs,
size_t addr_size,
unsigned char filter_mode,
bool to_report)
{
bool changed = false, flush_entries = to_report;
union net_bridge_eht_addr eht_src_addr;
if (br_multicast_eht_host_filter_mode(pg, h_addr) != filter_mode)
flush_entries = true;
memset(&eht_src_addr, 0, sizeof(eht_src_addr));
/* if we're changing mode del host and its entries */
if (flush_entries)
br_multicast_del_eht_host(pg, h_addr);
__eht_create_set_entries(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
filter_mode);
/* we can be missing sets only if we've deleted some entries */
if (flush_entries) {
struct net_bridge_group_eht_set *eht_set;
struct net_bridge_group_src *src_ent;
struct hlist_node *tmp;
hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) {
br_multicast_ip_src_to_eht_addr(&src_ent->addr,
&eht_src_addr);
if (!br_multicast_eht_set_lookup(pg, &eht_src_addr)) {
br_multicast_del_group_src(src_ent, true);
changed = true;
continue;
}
/* this is an optimization for TO_INCLUDE where we lower
* the set's timeout to LMQT to catch timeout hosts:
* - host A (timing out): set entries X, Y
* - host B: set entry Z (new from current TO_INCLUDE)
* sends BLOCK Z after LMQT but host A's EHT
* entries still exist (unless lowered to LMQT
* so they can timeout with the S,Gs)
* => we wait another LMQT, when we can just delete the
* group immediately
*/
if (!(src_ent->flags & BR_SGRP_F_SEND) ||
filter_mode != MCAST_INCLUDE ||
!to_report)
continue;
eht_set = br_multicast_eht_set_lookup(pg,
&eht_src_addr);
if (!eht_set)
continue;
mod_timer(&eht_set->timer, jiffies + br_multicast_lmqt(brmctx));
}
}
return changed;
}
static bool br_multicast_eht_inc(const struct net_bridge_mcast *brmctx,
struct net_bridge_port_group *pg,
union net_bridge_eht_addr *h_addr,
void *srcs,
u32 nsrcs,
size_t addr_size,
bool to_report)
{
bool changed;
changed = __eht_inc_exc(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
MCAST_INCLUDE, to_report);
br_eht_convert_host_filter_mode(brmctx, pg, h_addr, MCAST_INCLUDE);
return changed;
}
static bool br_multicast_eht_exc(const struct net_bridge_mcast *brmctx,
struct net_bridge_port_group *pg,
union net_bridge_eht_addr *h_addr,
void *srcs,
u32 nsrcs,
size_t addr_size,
bool to_report)
{
bool changed;
changed = __eht_inc_exc(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
MCAST_EXCLUDE, to_report);
br_eht_convert_host_filter_mode(brmctx, pg, h_addr, MCAST_EXCLUDE);
return changed;
}
static bool __eht_ip4_handle(const struct net_bridge_mcast *brmctx,
struct net_bridge_port_group *pg,
union net_bridge_eht_addr *h_addr,
void *srcs,
u32 nsrcs,
int grec_type)
{
bool changed = false, to_report = false;
switch (grec_type) {
case IGMPV3_ALLOW_NEW_SOURCES:
br_multicast_eht_allow(brmctx, pg, h_addr, srcs, nsrcs,
sizeof(__be32));
break;
case IGMPV3_BLOCK_OLD_SOURCES:
changed = br_multicast_eht_block(brmctx, pg, h_addr, srcs, nsrcs,
sizeof(__be32));
break;
case IGMPV3_CHANGE_TO_INCLUDE:
to_report = true;
fallthrough;
case IGMPV3_MODE_IS_INCLUDE:
changed = br_multicast_eht_inc(brmctx, pg, h_addr, srcs, nsrcs,
sizeof(__be32), to_report);
break;
case IGMPV3_CHANGE_TO_EXCLUDE:
to_report = true;
fallthrough;
case IGMPV3_MODE_IS_EXCLUDE:
changed = br_multicast_eht_exc(brmctx, pg, h_addr, srcs, nsrcs,
sizeof(__be32), to_report);
break;
}
return changed;
}
#if IS_ENABLED(CONFIG_IPV6)
static bool __eht_ip6_handle(const struct net_bridge_mcast *brmctx,
struct net_bridge_port_group *pg,
union net_bridge_eht_addr *h_addr,
void *srcs,
u32 nsrcs,
int grec_type)
{
bool changed = false, to_report = false;
switch (grec_type) {
case MLD2_ALLOW_NEW_SOURCES:
br_multicast_eht_allow(brmctx, pg, h_addr, srcs, nsrcs,
sizeof(struct in6_addr));
break;
case MLD2_BLOCK_OLD_SOURCES:
changed = br_multicast_eht_block(brmctx, pg, h_addr, srcs, nsrcs,
sizeof(struct in6_addr));
break;
case MLD2_CHANGE_TO_INCLUDE:
to_report = true;
fallthrough;
case MLD2_MODE_IS_INCLUDE:
changed = br_multicast_eht_inc(brmctx, pg, h_addr, srcs, nsrcs,
sizeof(struct in6_addr),
to_report);
break;
case MLD2_CHANGE_TO_EXCLUDE:
to_report = true;
fallthrough;
case MLD2_MODE_IS_EXCLUDE:
changed = br_multicast_eht_exc(brmctx, pg, h_addr, srcs, nsrcs,
sizeof(struct in6_addr),
to_report);
break;
}
return changed;
}
#endif
/* true means an entry was deleted */
bool br_multicast_eht_handle(const struct net_bridge_mcast *brmctx,
struct net_bridge_port_group *pg,
void *h_addr,
void *srcs,
u32 nsrcs,
size_t addr_size,
int grec_type)
{
bool eht_enabled = !!(pg->key.port->flags & BR_MULTICAST_FAST_LEAVE);
union net_bridge_eht_addr eht_host_addr;
bool changed = false;
if (!eht_enabled)
goto out;
memset(&eht_host_addr, 0, sizeof(eht_host_addr));
memcpy(&eht_host_addr, h_addr, addr_size);
if (addr_size == sizeof(__be32))
changed = __eht_ip4_handle(brmctx, pg, &eht_host_addr, srcs,
nsrcs, grec_type);
#if IS_ENABLED(CONFIG_IPV6)
else
changed = __eht_ip6_handle(brmctx, pg, &eht_host_addr, srcs,
nsrcs, grec_type);
#endif
out:
return changed;
}
int br_multicast_eht_set_hosts_limit(struct net_bridge_port *p,
u32 eht_hosts_limit)
{
struct net_bridge *br = p->br;
if (!eht_hosts_limit)
return -EINVAL;
spin_lock_bh(&br->multicast_lock);
p->multicast_eht_hosts_limit = eht_hosts_limit;
spin_unlock_bh(&br->multicast_lock);
return 0;
}
| linux-master | net/bridge/br_multicast_eht.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/cfm_bridge.h>
#include <uapi/linux/cfm_bridge.h>
#include "br_private_cfm.h"
static struct br_cfm_mep *br_mep_find(struct net_bridge *br, u32 instance)
{
struct br_cfm_mep *mep;
hlist_for_each_entry(mep, &br->mep_list, head)
if (mep->instance == instance)
return mep;
return NULL;
}
static struct br_cfm_mep *br_mep_find_ifindex(struct net_bridge *br,
u32 ifindex)
{
struct br_cfm_mep *mep;
hlist_for_each_entry_rcu(mep, &br->mep_list, head,
lockdep_rtnl_is_held())
if (mep->create.ifindex == ifindex)
return mep;
return NULL;
}
static struct br_cfm_peer_mep *br_peer_mep_find(struct br_cfm_mep *mep,
u32 mepid)
{
struct br_cfm_peer_mep *peer_mep;
hlist_for_each_entry_rcu(peer_mep, &mep->peer_mep_list, head,
lockdep_rtnl_is_held())
if (peer_mep->mepid == mepid)
return peer_mep;
return NULL;
}
static struct net_bridge_port *br_mep_get_port(struct net_bridge *br,
u32 ifindex)
{
struct net_bridge_port *port;
list_for_each_entry(port, &br->port_list, list)
if (port->dev->ifindex == ifindex)
return port;
return NULL;
}
/* Calculate the CCM interval in us. */
static u32 interval_to_us(enum br_cfm_ccm_interval interval)
{
switch (interval) {
case BR_CFM_CCM_INTERVAL_NONE:
return 0;
case BR_CFM_CCM_INTERVAL_3_3_MS:
return 3300;
case BR_CFM_CCM_INTERVAL_10_MS:
return 10 * 1000;
case BR_CFM_CCM_INTERVAL_100_MS:
return 100 * 1000;
case BR_CFM_CCM_INTERVAL_1_SEC:
return 1000 * 1000;
case BR_CFM_CCM_INTERVAL_10_SEC:
return 10 * 1000 * 1000;
case BR_CFM_CCM_INTERVAL_1_MIN:
return 60 * 1000 * 1000;
case BR_CFM_CCM_INTERVAL_10_MIN:
return 10 * 60 * 1000 * 1000;
}
return 0;
}
/* Convert the interface interval to CCM PDU value. */
static u32 interval_to_pdu(enum br_cfm_ccm_interval interval)
{
switch (interval) {
case BR_CFM_CCM_INTERVAL_NONE:
return 0;
case BR_CFM_CCM_INTERVAL_3_3_MS:
return 1;
case BR_CFM_CCM_INTERVAL_10_MS:
return 2;
case BR_CFM_CCM_INTERVAL_100_MS:
return 3;
case BR_CFM_CCM_INTERVAL_1_SEC:
return 4;
case BR_CFM_CCM_INTERVAL_10_SEC:
return 5;
case BR_CFM_CCM_INTERVAL_1_MIN:
return 6;
case BR_CFM_CCM_INTERVAL_10_MIN:
return 7;
}
return 0;
}
/* Convert the CCM PDU value to interval on interface. */
static u32 pdu_to_interval(u32 value)
{
switch (value) {
case 0:
return BR_CFM_CCM_INTERVAL_NONE;
case 1:
return BR_CFM_CCM_INTERVAL_3_3_MS;
case 2:
return BR_CFM_CCM_INTERVAL_10_MS;
case 3:
return BR_CFM_CCM_INTERVAL_100_MS;
case 4:
return BR_CFM_CCM_INTERVAL_1_SEC;
case 5:
return BR_CFM_CCM_INTERVAL_10_SEC;
case 6:
return BR_CFM_CCM_INTERVAL_1_MIN;
case 7:
return BR_CFM_CCM_INTERVAL_10_MIN;
}
return BR_CFM_CCM_INTERVAL_NONE;
}
static void ccm_rx_timer_start(struct br_cfm_peer_mep *peer_mep)
{
u32 interval_us;
interval_us = interval_to_us(peer_mep->mep->cc_config.exp_interval);
/* Function ccm_rx_dwork must be called with 1/4
* of the configured CC 'expected_interval'
* in order to detect CCM defect after 3.25 interval.
*/
queue_delayed_work(system_wq, &peer_mep->ccm_rx_dwork,
usecs_to_jiffies(interval_us / 4));
}
static void br_cfm_notify(int event, const struct net_bridge_port *port)
{
u32 filter = RTEXT_FILTER_CFM_STATUS;
br_info_notify(event, port->br, NULL, filter);
}
static void cc_peer_enable(struct br_cfm_peer_mep *peer_mep)
{
memset(&peer_mep->cc_status, 0, sizeof(peer_mep->cc_status));
peer_mep->ccm_rx_count_miss = 0;
ccm_rx_timer_start(peer_mep);
}
static void cc_peer_disable(struct br_cfm_peer_mep *peer_mep)
{
cancel_delayed_work_sync(&peer_mep->ccm_rx_dwork);
}
static struct sk_buff *ccm_frame_build(struct br_cfm_mep *mep,
const struct br_cfm_cc_ccm_tx_info *const tx_info)
{
struct br_cfm_common_hdr *common_hdr;
struct net_bridge_port *b_port;
struct br_cfm_maid *maid;
u8 *itu_reserved, *e_tlv;
struct ethhdr *eth_hdr;
struct sk_buff *skb;
__be32 *status_tlv;
__be32 *snumber;
__be16 *mepid;
skb = dev_alloc_skb(CFM_CCM_MAX_FRAME_LENGTH);
if (!skb)
return NULL;
rcu_read_lock();
b_port = rcu_dereference(mep->b_port);
if (!b_port) {
kfree_skb(skb);
rcu_read_unlock();
return NULL;
}
skb->dev = b_port->dev;
rcu_read_unlock();
/* The device cannot be deleted until the work_queue functions has
* completed. This function is called from ccm_tx_work_expired()
* that is a work_queue functions.
*/
skb->protocol = htons(ETH_P_CFM);
skb->priority = CFM_FRAME_PRIO;
/* Ethernet header */
eth_hdr = skb_put(skb, sizeof(*eth_hdr));
ether_addr_copy(eth_hdr->h_dest, tx_info->dmac.addr);
ether_addr_copy(eth_hdr->h_source, mep->config.unicast_mac.addr);
eth_hdr->h_proto = htons(ETH_P_CFM);
/* Common CFM Header */
common_hdr = skb_put(skb, sizeof(*common_hdr));
common_hdr->mdlevel_version = mep->config.mdlevel << 5;
common_hdr->opcode = BR_CFM_OPCODE_CCM;
common_hdr->flags = (mep->rdi << 7) |
interval_to_pdu(mep->cc_config.exp_interval);
common_hdr->tlv_offset = CFM_CCM_TLV_OFFSET;
/* Sequence number */
snumber = skb_put(skb, sizeof(*snumber));
if (tx_info->seq_no_update) {
*snumber = cpu_to_be32(mep->ccm_tx_snumber);
mep->ccm_tx_snumber += 1;
} else {
*snumber = 0;
}
mepid = skb_put(skb, sizeof(*mepid));
*mepid = cpu_to_be16((u16)mep->config.mepid);
maid = skb_put(skb, sizeof(*maid));
memcpy(maid->data, mep->cc_config.exp_maid.data, sizeof(maid->data));
/* ITU reserved (CFM_CCM_ITU_RESERVED_SIZE octets) */
itu_reserved = skb_put(skb, CFM_CCM_ITU_RESERVED_SIZE);
memset(itu_reserved, 0, CFM_CCM_ITU_RESERVED_SIZE);
/* Generel CFM TLV format:
* TLV type: one byte
* TLV value length: two bytes
* TLV value: 'TLV value length' bytes
*/
/* Port status TLV. The value length is 1. Total of 4 bytes. */
if (tx_info->port_tlv) {
status_tlv = skb_put(skb, sizeof(*status_tlv));
*status_tlv = cpu_to_be32((CFM_PORT_STATUS_TLV_TYPE << 24) |
(1 << 8) | /* Value length */
(tx_info->port_tlv_value & 0xFF));
}
/* Interface status TLV. The value length is 1. Total of 4 bytes. */
if (tx_info->if_tlv) {
status_tlv = skb_put(skb, sizeof(*status_tlv));
*status_tlv = cpu_to_be32((CFM_IF_STATUS_TLV_TYPE << 24) |
(1 << 8) | /* Value length */
(tx_info->if_tlv_value & 0xFF));
}
/* End TLV */
e_tlv = skb_put(skb, sizeof(*e_tlv));
*e_tlv = CFM_ENDE_TLV_TYPE;
return skb;
}
static void ccm_frame_tx(struct sk_buff *skb)
{
skb_reset_network_header(skb);
dev_queue_xmit(skb);
}
/* This function is called with the configured CC 'expected_interval'
* in order to drive CCM transmission when enabled.
*/
static void ccm_tx_work_expired(struct work_struct *work)
{
struct delayed_work *del_work;
struct br_cfm_mep *mep;
struct sk_buff *skb;
u32 interval_us;
del_work = to_delayed_work(work);
mep = container_of(del_work, struct br_cfm_mep, ccm_tx_dwork);
if (time_before_eq(mep->ccm_tx_end, jiffies)) {
/* Transmission period has ended */
mep->cc_ccm_tx_info.period = 0;
return;
}
skb = ccm_frame_build(mep, &mep->cc_ccm_tx_info);
if (skb)
ccm_frame_tx(skb);
interval_us = interval_to_us(mep->cc_config.exp_interval);
queue_delayed_work(system_wq, &mep->ccm_tx_dwork,
usecs_to_jiffies(interval_us));
}
/* This function is called with 1/4 of the configured CC 'expected_interval'
* in order to detect CCM defect after 3.25 interval.
*/
static void ccm_rx_work_expired(struct work_struct *work)
{
struct br_cfm_peer_mep *peer_mep;
struct net_bridge_port *b_port;
struct delayed_work *del_work;
del_work = to_delayed_work(work);
peer_mep = container_of(del_work, struct br_cfm_peer_mep, ccm_rx_dwork);
/* After 13 counts (4 * 3,25) then 3.25 intervals are expired */
if (peer_mep->ccm_rx_count_miss < 13) {
/* 3.25 intervals are NOT expired without CCM reception */
peer_mep->ccm_rx_count_miss++;
/* Start timer again */
ccm_rx_timer_start(peer_mep);
} else {
/* 3.25 intervals are expired without CCM reception.
* CCM defect detected
*/
peer_mep->cc_status.ccm_defect = true;
/* Change in CCM defect status - notify */
rcu_read_lock();
b_port = rcu_dereference(peer_mep->mep->b_port);
if (b_port)
br_cfm_notify(RTM_NEWLINK, b_port);
rcu_read_unlock();
}
}
static u32 ccm_tlv_extract(struct sk_buff *skb, u32 index,
struct br_cfm_peer_mep *peer_mep)
{
__be32 *s_tlv;
__be32 _s_tlv;
u32 h_s_tlv;
u8 *e_tlv;
u8 _e_tlv;
e_tlv = skb_header_pointer(skb, index, sizeof(_e_tlv), &_e_tlv);
if (!e_tlv)
return 0;
/* TLV is present - get the status TLV */
s_tlv = skb_header_pointer(skb,
index,
sizeof(_s_tlv), &_s_tlv);
if (!s_tlv)
return 0;
h_s_tlv = ntohl(*s_tlv);
if ((h_s_tlv >> 24) == CFM_IF_STATUS_TLV_TYPE) {
/* Interface status TLV */
peer_mep->cc_status.tlv_seen = true;
peer_mep->cc_status.if_tlv_value = (h_s_tlv & 0xFF);
}
if ((h_s_tlv >> 24) == CFM_PORT_STATUS_TLV_TYPE) {
/* Port status TLV */
peer_mep->cc_status.tlv_seen = true;
peer_mep->cc_status.port_tlv_value = (h_s_tlv & 0xFF);
}
/* The Sender ID TLV is not handled */
/* The Organization-Specific TLV is not handled */
/* Return the length of this tlv.
* This is the length of the value field plus 3 bytes for size of type
* field and length field
*/
return ((h_s_tlv >> 8) & 0xFFFF) + 3;
}
/* note: already called with rcu_read_lock */
static int br_cfm_frame_rx(struct net_bridge_port *port, struct sk_buff *skb)
{
u32 mdlevel, interval, size, index, max;
const struct br_cfm_common_hdr *hdr;
struct br_cfm_peer_mep *peer_mep;
const struct br_cfm_maid *maid;
struct br_cfm_common_hdr _hdr;
struct br_cfm_maid _maid;
struct br_cfm_mep *mep;
struct net_bridge *br;
__be32 *snumber;
__be32 _snumber;
__be16 *mepid;
__be16 _mepid;
if (port->state == BR_STATE_DISABLED)
return 0;
hdr = skb_header_pointer(skb, 0, sizeof(_hdr), &_hdr);
if (!hdr)
return 1;
br = port->br;
mep = br_mep_find_ifindex(br, port->dev->ifindex);
if (unlikely(!mep))
/* No MEP on this port - must be forwarded */
return 0;
mdlevel = hdr->mdlevel_version >> 5;
if (mdlevel > mep->config.mdlevel)
/* The level is above this MEP level - must be forwarded */
return 0;
if ((hdr->mdlevel_version & 0x1F) != 0) {
/* Invalid version */
mep->status.version_unexp_seen = true;
return 1;
}
if (mdlevel < mep->config.mdlevel) {
/* The level is below this MEP level */
mep->status.rx_level_low_seen = true;
return 1;
}
if (hdr->opcode == BR_CFM_OPCODE_CCM) {
/* CCM PDU received. */
/* MA ID is after common header + sequence number + MEP ID */
maid = skb_header_pointer(skb,
CFM_CCM_PDU_MAID_OFFSET,
sizeof(_maid), &_maid);
if (!maid)
return 1;
if (memcmp(maid->data, mep->cc_config.exp_maid.data,
sizeof(maid->data)))
/* MA ID not as expected */
return 1;
/* MEP ID is after common header + sequence number */
mepid = skb_header_pointer(skb,
CFM_CCM_PDU_MEPID_OFFSET,
sizeof(_mepid), &_mepid);
if (!mepid)
return 1;
peer_mep = br_peer_mep_find(mep, (u32)ntohs(*mepid));
if (!peer_mep)
return 1;
/* Interval is in common header flags */
interval = hdr->flags & 0x07;
if (mep->cc_config.exp_interval != pdu_to_interval(interval))
/* Interval not as expected */
return 1;
/* A valid CCM frame is received */
if (peer_mep->cc_status.ccm_defect) {
peer_mep->cc_status.ccm_defect = false;
/* Change in CCM defect status - notify */
br_cfm_notify(RTM_NEWLINK, port);
/* Start CCM RX timer */
ccm_rx_timer_start(peer_mep);
}
peer_mep->cc_status.seen = true;
peer_mep->ccm_rx_count_miss = 0;
/* RDI is in common header flags */
peer_mep->cc_status.rdi = (hdr->flags & 0x80) ? true : false;
/* Sequence number is after common header */
snumber = skb_header_pointer(skb,
CFM_CCM_PDU_SEQNR_OFFSET,
sizeof(_snumber), &_snumber);
if (!snumber)
return 1;
if (ntohl(*snumber) != (mep->ccm_rx_snumber + 1))
/* Unexpected sequence number */
peer_mep->cc_status.seq_unexp_seen = true;
mep->ccm_rx_snumber = ntohl(*snumber);
/* TLV end is after common header + sequence number + MEP ID +
* MA ID + ITU reserved
*/
index = CFM_CCM_PDU_TLV_OFFSET;
max = 0;
do { /* Handle all TLVs */
size = ccm_tlv_extract(skb, index, peer_mep);
index += size;
max += 1;
} while (size != 0 && max < 4); /* Max four TLVs possible */
return 1;
}
mep->status.opcode_unexp_seen = true;
return 1;
}
static struct br_frame_type cfm_frame_type __read_mostly = {
.type = cpu_to_be16(ETH_P_CFM),
.frame_handler = br_cfm_frame_rx,
};
int br_cfm_mep_create(struct net_bridge *br,
const u32 instance,
struct br_cfm_mep_create *const create,
struct netlink_ext_ack *extack)
{
struct net_bridge_port *p;
struct br_cfm_mep *mep;
ASSERT_RTNL();
if (create->domain == BR_CFM_VLAN) {
NL_SET_ERR_MSG_MOD(extack,
"VLAN domain not supported");
return -EINVAL;
}
if (create->domain != BR_CFM_PORT) {
NL_SET_ERR_MSG_MOD(extack,
"Invalid domain value");
return -EINVAL;
}
if (create->direction == BR_CFM_MEP_DIRECTION_UP) {
NL_SET_ERR_MSG_MOD(extack,
"Up-MEP not supported");
return -EINVAL;
}
if (create->direction != BR_CFM_MEP_DIRECTION_DOWN) {
NL_SET_ERR_MSG_MOD(extack,
"Invalid direction value");
return -EINVAL;
}
p = br_mep_get_port(br, create->ifindex);
if (!p) {
NL_SET_ERR_MSG_MOD(extack,
"Port is not related to bridge");
return -EINVAL;
}
mep = br_mep_find(br, instance);
if (mep) {
NL_SET_ERR_MSG_MOD(extack,
"MEP instance already exists");
return -EEXIST;
}
/* In PORT domain only one instance can be created per port */
if (create->domain == BR_CFM_PORT) {
mep = br_mep_find_ifindex(br, create->ifindex);
if (mep) {
NL_SET_ERR_MSG_MOD(extack,
"Only one Port MEP on a port allowed");
return -EINVAL;
}
}
mep = kzalloc(sizeof(*mep), GFP_KERNEL);
if (!mep)
return -ENOMEM;
mep->create = *create;
mep->instance = instance;
rcu_assign_pointer(mep->b_port, p);
INIT_HLIST_HEAD(&mep->peer_mep_list);
INIT_DELAYED_WORK(&mep->ccm_tx_dwork, ccm_tx_work_expired);
if (hlist_empty(&br->mep_list))
br_add_frame(br, &cfm_frame_type);
hlist_add_tail_rcu(&mep->head, &br->mep_list);
return 0;
}
static void mep_delete_implementation(struct net_bridge *br,
struct br_cfm_mep *mep)
{
struct br_cfm_peer_mep *peer_mep;
struct hlist_node *n_store;
ASSERT_RTNL();
/* Empty and free peer MEP list */
hlist_for_each_entry_safe(peer_mep, n_store, &mep->peer_mep_list, head) {
cancel_delayed_work_sync(&peer_mep->ccm_rx_dwork);
hlist_del_rcu(&peer_mep->head);
kfree_rcu(peer_mep, rcu);
}
cancel_delayed_work_sync(&mep->ccm_tx_dwork);
RCU_INIT_POINTER(mep->b_port, NULL);
hlist_del_rcu(&mep->head);
kfree_rcu(mep, rcu);
if (hlist_empty(&br->mep_list))
br_del_frame(br, &cfm_frame_type);
}
int br_cfm_mep_delete(struct net_bridge *br,
const u32 instance,
struct netlink_ext_ack *extack)
{
struct br_cfm_mep *mep;
ASSERT_RTNL();
mep = br_mep_find(br, instance);
if (!mep) {
NL_SET_ERR_MSG_MOD(extack,
"MEP instance does not exists");
return -ENOENT;
}
mep_delete_implementation(br, mep);
return 0;
}
int br_cfm_mep_config_set(struct net_bridge *br,
const u32 instance,
const struct br_cfm_mep_config *const config,
struct netlink_ext_ack *extack)
{
struct br_cfm_mep *mep;
ASSERT_RTNL();
mep = br_mep_find(br, instance);
if (!mep) {
NL_SET_ERR_MSG_MOD(extack,
"MEP instance does not exists");
return -ENOENT;
}
mep->config = *config;
return 0;
}
int br_cfm_cc_config_set(struct net_bridge *br,
const u32 instance,
const struct br_cfm_cc_config *const config,
struct netlink_ext_ack *extack)
{
struct br_cfm_peer_mep *peer_mep;
struct br_cfm_mep *mep;
ASSERT_RTNL();
mep = br_mep_find(br, instance);
if (!mep) {
NL_SET_ERR_MSG_MOD(extack,
"MEP instance does not exists");
return -ENOENT;
}
/* Check for no change in configuration */
if (memcmp(config, &mep->cc_config, sizeof(*config)) == 0)
return 0;
if (config->enable && !mep->cc_config.enable)
/* CC is enabled */
hlist_for_each_entry(peer_mep, &mep->peer_mep_list, head)
cc_peer_enable(peer_mep);
if (!config->enable && mep->cc_config.enable)
/* CC is disabled */
hlist_for_each_entry(peer_mep, &mep->peer_mep_list, head)
cc_peer_disable(peer_mep);
mep->cc_config = *config;
mep->ccm_rx_snumber = 0;
mep->ccm_tx_snumber = 1;
return 0;
}
int br_cfm_cc_peer_mep_add(struct net_bridge *br, const u32 instance,
u32 mepid,
struct netlink_ext_ack *extack)
{
struct br_cfm_peer_mep *peer_mep;
struct br_cfm_mep *mep;
ASSERT_RTNL();
mep = br_mep_find(br, instance);
if (!mep) {
NL_SET_ERR_MSG_MOD(extack,
"MEP instance does not exists");
return -ENOENT;
}
peer_mep = br_peer_mep_find(mep, mepid);
if (peer_mep) {
NL_SET_ERR_MSG_MOD(extack,
"Peer MEP-ID already exists");
return -EEXIST;
}
peer_mep = kzalloc(sizeof(*peer_mep), GFP_KERNEL);
if (!peer_mep)
return -ENOMEM;
peer_mep->mepid = mepid;
peer_mep->mep = mep;
INIT_DELAYED_WORK(&peer_mep->ccm_rx_dwork, ccm_rx_work_expired);
if (mep->cc_config.enable)
cc_peer_enable(peer_mep);
hlist_add_tail_rcu(&peer_mep->head, &mep->peer_mep_list);
return 0;
}
int br_cfm_cc_peer_mep_remove(struct net_bridge *br, const u32 instance,
u32 mepid,
struct netlink_ext_ack *extack)
{
struct br_cfm_peer_mep *peer_mep;
struct br_cfm_mep *mep;
ASSERT_RTNL();
mep = br_mep_find(br, instance);
if (!mep) {
NL_SET_ERR_MSG_MOD(extack,
"MEP instance does not exists");
return -ENOENT;
}
peer_mep = br_peer_mep_find(mep, mepid);
if (!peer_mep) {
NL_SET_ERR_MSG_MOD(extack,
"Peer MEP-ID does not exists");
return -ENOENT;
}
cc_peer_disable(peer_mep);
hlist_del_rcu(&peer_mep->head);
kfree_rcu(peer_mep, rcu);
return 0;
}
int br_cfm_cc_rdi_set(struct net_bridge *br, const u32 instance,
const bool rdi, struct netlink_ext_ack *extack)
{
struct br_cfm_mep *mep;
ASSERT_RTNL();
mep = br_mep_find(br, instance);
if (!mep) {
NL_SET_ERR_MSG_MOD(extack,
"MEP instance does not exists");
return -ENOENT;
}
mep->rdi = rdi;
return 0;
}
int br_cfm_cc_ccm_tx(struct net_bridge *br, const u32 instance,
const struct br_cfm_cc_ccm_tx_info *const tx_info,
struct netlink_ext_ack *extack)
{
struct br_cfm_mep *mep;
ASSERT_RTNL();
mep = br_mep_find(br, instance);
if (!mep) {
NL_SET_ERR_MSG_MOD(extack,
"MEP instance does not exists");
return -ENOENT;
}
if (memcmp(tx_info, &mep->cc_ccm_tx_info, sizeof(*tx_info)) == 0) {
/* No change in tx_info. */
if (mep->cc_ccm_tx_info.period == 0)
/* Transmission is not enabled - just return */
return 0;
/* Transmission is ongoing, the end time is recalculated */
mep->ccm_tx_end = jiffies +
usecs_to_jiffies(tx_info->period * 1000000);
return 0;
}
if (tx_info->period == 0 && mep->cc_ccm_tx_info.period == 0)
/* Some change in info and transmission is not ongoing */
goto save;
if (tx_info->period != 0 && mep->cc_ccm_tx_info.period != 0) {
/* Some change in info and transmission is ongoing
* The end time is recalculated
*/
mep->ccm_tx_end = jiffies +
usecs_to_jiffies(tx_info->period * 1000000);
goto save;
}
if (tx_info->period == 0 && mep->cc_ccm_tx_info.period != 0) {
cancel_delayed_work_sync(&mep->ccm_tx_dwork);
goto save;
}
/* Start delayed work to transmit CCM frames. It is done with zero delay
* to send first frame immediately
*/
mep->ccm_tx_end = jiffies + usecs_to_jiffies(tx_info->period * 1000000);
queue_delayed_work(system_wq, &mep->ccm_tx_dwork, 0);
save:
mep->cc_ccm_tx_info = *tx_info;
return 0;
}
int br_cfm_mep_count(struct net_bridge *br, u32 *count)
{
struct br_cfm_mep *mep;
*count = 0;
rcu_read_lock();
hlist_for_each_entry_rcu(mep, &br->mep_list, head)
*count += 1;
rcu_read_unlock();
return 0;
}
int br_cfm_peer_mep_count(struct net_bridge *br, u32 *count)
{
struct br_cfm_peer_mep *peer_mep;
struct br_cfm_mep *mep;
*count = 0;
rcu_read_lock();
hlist_for_each_entry_rcu(mep, &br->mep_list, head)
hlist_for_each_entry_rcu(peer_mep, &mep->peer_mep_list, head)
*count += 1;
rcu_read_unlock();
return 0;
}
bool br_cfm_created(struct net_bridge *br)
{
return !hlist_empty(&br->mep_list);
}
/* Deletes the CFM instances on a specific bridge port
*/
void br_cfm_port_del(struct net_bridge *br, struct net_bridge_port *port)
{
struct hlist_node *n_store;
struct br_cfm_mep *mep;
ASSERT_RTNL();
hlist_for_each_entry_safe(mep, n_store, &br->mep_list, head)
if (mep->create.ifindex == port->dev->ifindex)
mep_delete_implementation(br, mep);
}
| linux-master | net/bridge/br_cfm.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Bridge netlink control interface
*
* Authors:
* Stephen Hemminger <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/etherdevice.h>
#include <net/rtnetlink.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <uapi/linux/if_bridge.h>
#include "br_private.h"
#include "br_private_stp.h"
#include "br_private_cfm.h"
#include "br_private_tunnel.h"
#include "br_private_mcast_eht.h"
static int __get_num_vlan_infos(struct net_bridge_vlan_group *vg,
u32 filter_mask)
{
struct net_bridge_vlan *v;
u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
u16 flags, pvid;
int num_vlans = 0;
if (!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
return 0;
pvid = br_get_pvid(vg);
/* Count number of vlan infos */
list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
flags = 0;
/* only a context, bridge vlan not activated */
if (!br_vlan_should_use(v))
continue;
if (v->vid == pvid)
flags |= BRIDGE_VLAN_INFO_PVID;
if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
flags |= BRIDGE_VLAN_INFO_UNTAGGED;
if (vid_range_start == 0) {
goto initvars;
} else if ((v->vid - vid_range_end) == 1 &&
flags == vid_range_flags) {
vid_range_end = v->vid;
continue;
} else {
if ((vid_range_end - vid_range_start) > 0)
num_vlans += 2;
else
num_vlans += 1;
}
initvars:
vid_range_start = v->vid;
vid_range_end = v->vid;
vid_range_flags = flags;
}
if (vid_range_start != 0) {
if ((vid_range_end - vid_range_start) > 0)
num_vlans += 2;
else
num_vlans += 1;
}
return num_vlans;
}
static int br_get_num_vlan_infos(struct net_bridge_vlan_group *vg,
u32 filter_mask)
{
int num_vlans;
if (!vg)
return 0;
if (filter_mask & RTEXT_FILTER_BRVLAN)
return vg->num_vlans;
rcu_read_lock();
num_vlans = __get_num_vlan_infos(vg, filter_mask);
rcu_read_unlock();
return num_vlans;
}
static size_t br_get_link_af_size_filtered(const struct net_device *dev,
u32 filter_mask)
{
struct net_bridge_vlan_group *vg = NULL;
struct net_bridge_port *p = NULL;
struct net_bridge *br = NULL;
u32 num_cfm_peer_mep_infos;
u32 num_cfm_mep_infos;
size_t vinfo_sz = 0;
int num_vlan_infos;
rcu_read_lock();
if (netif_is_bridge_port(dev)) {
p = br_port_get_check_rcu(dev);
if (p)
vg = nbp_vlan_group_rcu(p);
} else if (netif_is_bridge_master(dev)) {
br = netdev_priv(dev);
vg = br_vlan_group_rcu(br);
}
num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask);
rcu_read_unlock();
if (p && (p->flags & BR_VLAN_TUNNEL))
vinfo_sz += br_get_vlan_tunnel_info_size(vg);
/* Each VLAN is returned in bridge_vlan_info along with flags */
vinfo_sz += num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info));
if (p && vg && (filter_mask & RTEXT_FILTER_MST))
vinfo_sz += br_mst_info_size(vg);
if (!(filter_mask & RTEXT_FILTER_CFM_STATUS))
return vinfo_sz;
if (!br)
return vinfo_sz;
/* CFM status info must be added */
br_cfm_mep_count(br, &num_cfm_mep_infos);
br_cfm_peer_mep_count(br, &num_cfm_peer_mep_infos);
vinfo_sz += nla_total_size(0); /* IFLA_BRIDGE_CFM */
/* For each status struct the MEP instance (u32) is added */
/* MEP instance (u32) + br_cfm_mep_status */
vinfo_sz += num_cfm_mep_infos *
/*IFLA_BRIDGE_CFM_MEP_STATUS_INSTANCE */
(nla_total_size(sizeof(u32))
/* IFLA_BRIDGE_CFM_MEP_STATUS_OPCODE_UNEXP_SEEN */
+ nla_total_size(sizeof(u32))
/* IFLA_BRIDGE_CFM_MEP_STATUS_VERSION_UNEXP_SEEN */
+ nla_total_size(sizeof(u32))
/* IFLA_BRIDGE_CFM_MEP_STATUS_RX_LEVEL_LOW_SEEN */
+ nla_total_size(sizeof(u32)));
/* MEP instance (u32) + br_cfm_cc_peer_status */
vinfo_sz += num_cfm_peer_mep_infos *
/* IFLA_BRIDGE_CFM_CC_PEER_STATUS_INSTANCE */
(nla_total_size(sizeof(u32))
/* IFLA_BRIDGE_CFM_CC_PEER_STATUS_PEER_MEPID */
+ nla_total_size(sizeof(u32))
/* IFLA_BRIDGE_CFM_CC_PEER_STATUS_CCM_DEFECT */
+ nla_total_size(sizeof(u32))
/* IFLA_BRIDGE_CFM_CC_PEER_STATUS_RDI */
+ nla_total_size(sizeof(u32))
/* IFLA_BRIDGE_CFM_CC_PEER_STATUS_PORT_TLV_VALUE */
+ nla_total_size(sizeof(u8))
/* IFLA_BRIDGE_CFM_CC_PEER_STATUS_IF_TLV_VALUE */
+ nla_total_size(sizeof(u8))
/* IFLA_BRIDGE_CFM_CC_PEER_STATUS_SEEN */
+ nla_total_size(sizeof(u32))
/* IFLA_BRIDGE_CFM_CC_PEER_STATUS_TLV_SEEN */
+ nla_total_size(sizeof(u32))
/* IFLA_BRIDGE_CFM_CC_PEER_STATUS_SEQ_UNEXP_SEEN */
+ nla_total_size(sizeof(u32)));
return vinfo_sz;
}
static inline size_t br_port_info_size(void)
{
return nla_total_size(1) /* IFLA_BRPORT_STATE */
+ nla_total_size(2) /* IFLA_BRPORT_PRIORITY */
+ nla_total_size(4) /* IFLA_BRPORT_COST */
+ nla_total_size(1) /* IFLA_BRPORT_MODE */
+ nla_total_size(1) /* IFLA_BRPORT_GUARD */
+ nla_total_size(1) /* IFLA_BRPORT_PROTECT */
+ nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */
+ nla_total_size(1) /* IFLA_BRPORT_MCAST_TO_UCAST */
+ nla_total_size(1) /* IFLA_BRPORT_LEARNING */
+ nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */
+ nla_total_size(1) /* IFLA_BRPORT_MCAST_FLOOD */
+ nla_total_size(1) /* IFLA_BRPORT_BCAST_FLOOD */
+ nla_total_size(1) /* IFLA_BRPORT_PROXYARP */
+ nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */
+ nla_total_size(1) /* IFLA_BRPORT_VLAN_TUNNEL */
+ nla_total_size(1) /* IFLA_BRPORT_NEIGH_SUPPRESS */
+ nla_total_size(1) /* IFLA_BRPORT_ISOLATED */
+ nla_total_size(1) /* IFLA_BRPORT_LOCKED */
+ nla_total_size(1) /* IFLA_BRPORT_MAB */
+ nla_total_size(1) /* IFLA_BRPORT_NEIGH_VLAN_SUPPRESS */
+ nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_ROOT_ID */
+ nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_BRIDGE_ID */
+ nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_PORT */
+ nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_COST */
+ nla_total_size(sizeof(u16)) /* IFLA_BRPORT_ID */
+ nla_total_size(sizeof(u16)) /* IFLA_BRPORT_NO */
+ nla_total_size(sizeof(u8)) /* IFLA_BRPORT_TOPOLOGY_CHANGE_ACK */
+ nla_total_size(sizeof(u8)) /* IFLA_BRPORT_CONFIG_PENDING */
+ nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_MESSAGE_AGE_TIMER */
+ nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_FORWARD_DELAY_TIMER */
+ nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_HOLD_TIMER */
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+ nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MULTICAST_ROUTER */
+ nla_total_size(sizeof(u32)) /* IFLA_BRPORT_MCAST_N_GROUPS */
+ nla_total_size(sizeof(u32)) /* IFLA_BRPORT_MCAST_MAX_GROUPS */
#endif
+ nla_total_size(sizeof(u16)) /* IFLA_BRPORT_GROUP_FWD_MASK */
+ nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MRP_RING_OPEN */
+ nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MRP_IN_OPEN */
+ nla_total_size(sizeof(u32)) /* IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT */
+ nla_total_size(sizeof(u32)) /* IFLA_BRPORT_MCAST_EHT_HOSTS_CNT */
+ nla_total_size(sizeof(u32)) /* IFLA_BRPORT_BACKUP_NHID */
+ 0;
}
static inline size_t br_nlmsg_size(struct net_device *dev, u32 filter_mask)
{
return NLMSG_ALIGN(sizeof(struct ifinfomsg))
+ nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
+ nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
+ nla_total_size(4) /* IFLA_MASTER */
+ nla_total_size(4) /* IFLA_MTU */
+ nla_total_size(4) /* IFLA_LINK */
+ nla_total_size(1) /* IFLA_OPERSTATE */
+ nla_total_size(br_port_info_size()) /* IFLA_PROTINFO */
+ nla_total_size(br_get_link_af_size_filtered(dev,
filter_mask)) /* IFLA_AF_SPEC */
+ nla_total_size(4); /* IFLA_BRPORT_BACKUP_PORT */
}
static int br_port_fill_attrs(struct sk_buff *skb,
const struct net_bridge_port *p)
{
u8 mode = !!(p->flags & BR_HAIRPIN_MODE);
struct net_bridge_port *backup_p;
u64 timerval;
if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) ||
nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) ||
nla_put_u32(skb, IFLA_BRPORT_COST, p->path_cost) ||
nla_put_u8(skb, IFLA_BRPORT_MODE, mode) ||
nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) ||
nla_put_u8(skb, IFLA_BRPORT_PROTECT,
!!(p->flags & BR_ROOT_BLOCK)) ||
nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE,
!!(p->flags & BR_MULTICAST_FAST_LEAVE)) ||
nla_put_u8(skb, IFLA_BRPORT_MCAST_TO_UCAST,
!!(p->flags & BR_MULTICAST_TO_UNICAST)) ||
nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) ||
nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD,
!!(p->flags & BR_FLOOD)) ||
nla_put_u8(skb, IFLA_BRPORT_MCAST_FLOOD,
!!(p->flags & BR_MCAST_FLOOD)) ||
nla_put_u8(skb, IFLA_BRPORT_BCAST_FLOOD,
!!(p->flags & BR_BCAST_FLOOD)) ||
nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) ||
nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI,
!!(p->flags & BR_PROXYARP_WIFI)) ||
nla_put(skb, IFLA_BRPORT_ROOT_ID, sizeof(struct ifla_bridge_id),
&p->designated_root) ||
nla_put(skb, IFLA_BRPORT_BRIDGE_ID, sizeof(struct ifla_bridge_id),
&p->designated_bridge) ||
nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_PORT, p->designated_port) ||
nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_COST, p->designated_cost) ||
nla_put_u16(skb, IFLA_BRPORT_ID, p->port_id) ||
nla_put_u16(skb, IFLA_BRPORT_NO, p->port_no) ||
nla_put_u8(skb, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
p->topology_change_ack) ||
nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending) ||
nla_put_u8(skb, IFLA_BRPORT_VLAN_TUNNEL, !!(p->flags &
BR_VLAN_TUNNEL)) ||
nla_put_u16(skb, IFLA_BRPORT_GROUP_FWD_MASK, p->group_fwd_mask) ||
nla_put_u8(skb, IFLA_BRPORT_NEIGH_SUPPRESS,
!!(p->flags & BR_NEIGH_SUPPRESS)) ||
nla_put_u8(skb, IFLA_BRPORT_MRP_RING_OPEN, !!(p->flags &
BR_MRP_LOST_CONT)) ||
nla_put_u8(skb, IFLA_BRPORT_MRP_IN_OPEN,
!!(p->flags & BR_MRP_LOST_IN_CONT)) ||
nla_put_u8(skb, IFLA_BRPORT_ISOLATED, !!(p->flags & BR_ISOLATED)) ||
nla_put_u8(skb, IFLA_BRPORT_LOCKED, !!(p->flags & BR_PORT_LOCKED)) ||
nla_put_u8(skb, IFLA_BRPORT_MAB, !!(p->flags & BR_PORT_MAB)) ||
nla_put_u8(skb, IFLA_BRPORT_NEIGH_VLAN_SUPPRESS,
!!(p->flags & BR_NEIGH_VLAN_SUPPRESS)))
return -EMSGSIZE;
timerval = br_timer_value(&p->message_age_timer);
if (nla_put_u64_64bit(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval,
IFLA_BRPORT_PAD))
return -EMSGSIZE;
timerval = br_timer_value(&p->forward_delay_timer);
if (nla_put_u64_64bit(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval,
IFLA_BRPORT_PAD))
return -EMSGSIZE;
timerval = br_timer_value(&p->hold_timer);
if (nla_put_u64_64bit(skb, IFLA_BRPORT_HOLD_TIMER, timerval,
IFLA_BRPORT_PAD))
return -EMSGSIZE;
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
if (nla_put_u8(skb, IFLA_BRPORT_MULTICAST_ROUTER,
p->multicast_ctx.multicast_router) ||
nla_put_u32(skb, IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT,
p->multicast_eht_hosts_limit) ||
nla_put_u32(skb, IFLA_BRPORT_MCAST_EHT_HOSTS_CNT,
p->multicast_eht_hosts_cnt) ||
nla_put_u32(skb, IFLA_BRPORT_MCAST_N_GROUPS,
br_multicast_ngroups_get(&p->multicast_ctx)) ||
nla_put_u32(skb, IFLA_BRPORT_MCAST_MAX_GROUPS,
br_multicast_ngroups_get_max(&p->multicast_ctx)))
return -EMSGSIZE;
#endif
/* we might be called only with br->lock */
rcu_read_lock();
backup_p = rcu_dereference(p->backup_port);
if (backup_p)
nla_put_u32(skb, IFLA_BRPORT_BACKUP_PORT,
backup_p->dev->ifindex);
rcu_read_unlock();
if (p->backup_nhid &&
nla_put_u32(skb, IFLA_BRPORT_BACKUP_NHID, p->backup_nhid))
return -EMSGSIZE;
return 0;
}
static int br_fill_ifvlaninfo_range(struct sk_buff *skb, u16 vid_start,
u16 vid_end, u16 flags)
{
struct bridge_vlan_info vinfo;
if ((vid_end - vid_start) > 0) {
/* add range to skb */
vinfo.vid = vid_start;
vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_BEGIN;
if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
sizeof(vinfo), &vinfo))
goto nla_put_failure;
vinfo.vid = vid_end;
vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_END;
if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
sizeof(vinfo), &vinfo))
goto nla_put_failure;
} else {
vinfo.vid = vid_start;
vinfo.flags = flags;
if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
sizeof(vinfo), &vinfo))
goto nla_put_failure;
}
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb,
struct net_bridge_vlan_group *vg)
{
struct net_bridge_vlan *v;
u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
u16 flags, pvid;
int err = 0;
/* Pack IFLA_BRIDGE_VLAN_INFO's for every vlan
* and mark vlan info with begin and end flags
* if vlaninfo represents a range
*/
pvid = br_get_pvid(vg);
list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
flags = 0;
if (!br_vlan_should_use(v))
continue;
if (v->vid == pvid)
flags |= BRIDGE_VLAN_INFO_PVID;
if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
flags |= BRIDGE_VLAN_INFO_UNTAGGED;
if (vid_range_start == 0) {
goto initvars;
} else if ((v->vid - vid_range_end) == 1 &&
flags == vid_range_flags) {
vid_range_end = v->vid;
continue;
} else {
err = br_fill_ifvlaninfo_range(skb, vid_range_start,
vid_range_end,
vid_range_flags);
if (err)
return err;
}
initvars:
vid_range_start = v->vid;
vid_range_end = v->vid;
vid_range_flags = flags;
}
if (vid_range_start != 0) {
/* Call it once more to send any left over vlans */
err = br_fill_ifvlaninfo_range(skb, vid_range_start,
vid_range_end,
vid_range_flags);
if (err)
return err;
}
return 0;
}
static int br_fill_ifvlaninfo(struct sk_buff *skb,
struct net_bridge_vlan_group *vg)
{
struct bridge_vlan_info vinfo;
struct net_bridge_vlan *v;
u16 pvid;
pvid = br_get_pvid(vg);
list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
if (!br_vlan_should_use(v))
continue;
vinfo.vid = v->vid;
vinfo.flags = 0;
if (v->vid == pvid)
vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
sizeof(vinfo), &vinfo))
goto nla_put_failure;
}
return 0;
nla_put_failure:
return -EMSGSIZE;
}
/*
* Create one netlink message for one interface
* Contains port and master info as well as carrier and bridge state.
*/
static int br_fill_ifinfo(struct sk_buff *skb,
const struct net_bridge_port *port,
u32 pid, u32 seq, int event, unsigned int flags,
u32 filter_mask, const struct net_device *dev,
bool getlink)
{
u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
struct nlattr *af = NULL;
struct net_bridge *br;
struct ifinfomsg *hdr;
struct nlmsghdr *nlh;
if (port)
br = port->br;
else
br = netdev_priv(dev);
br_debug(br, "br_fill_info event %d port %s master %s\n",
event, dev->name, br->dev->name);
nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags);
if (nlh == NULL)
return -EMSGSIZE;
hdr = nlmsg_data(nlh);
hdr->ifi_family = AF_BRIDGE;
hdr->__ifi_pad = 0;
hdr->ifi_type = dev->type;
hdr->ifi_index = dev->ifindex;
hdr->ifi_flags = dev_get_flags(dev);
hdr->ifi_change = 0;
if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) ||
nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
(dev->addr_len &&
nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
(dev->ifindex != dev_get_iflink(dev) &&
nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
goto nla_put_failure;
if (event == RTM_NEWLINK && port) {
struct nlattr *nest;
nest = nla_nest_start(skb, IFLA_PROTINFO);
if (nest == NULL || br_port_fill_attrs(skb, port) < 0)
goto nla_put_failure;
nla_nest_end(skb, nest);
}
if (filter_mask & (RTEXT_FILTER_BRVLAN |
RTEXT_FILTER_BRVLAN_COMPRESSED |
RTEXT_FILTER_MRP |
RTEXT_FILTER_CFM_CONFIG |
RTEXT_FILTER_CFM_STATUS |
RTEXT_FILTER_MST)) {
af = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
if (!af)
goto nla_put_failure;
}
/* Check if the VID information is requested */
if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
struct net_bridge_vlan_group *vg;
int err;
/* RCU needed because of the VLAN locking rules (rcu || rtnl) */
rcu_read_lock();
if (port)
vg = nbp_vlan_group_rcu(port);
else
vg = br_vlan_group_rcu(br);
if (!vg || !vg->num_vlans) {
rcu_read_unlock();
goto done;
}
if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
err = br_fill_ifvlaninfo_compressed(skb, vg);
else
err = br_fill_ifvlaninfo(skb, vg);
if (port && (port->flags & BR_VLAN_TUNNEL))
err = br_fill_vlan_tunnel_info(skb, vg);
rcu_read_unlock();
if (err)
goto nla_put_failure;
}
if (filter_mask & RTEXT_FILTER_MRP) {
int err;
if (!br_mrp_enabled(br) || port)
goto done;
rcu_read_lock();
err = br_mrp_fill_info(skb, br);
rcu_read_unlock();
if (err)
goto nla_put_failure;
}
if (filter_mask & (RTEXT_FILTER_CFM_CONFIG | RTEXT_FILTER_CFM_STATUS)) {
struct nlattr *cfm_nest = NULL;
int err;
if (!br_cfm_created(br) || port)
goto done;
cfm_nest = nla_nest_start(skb, IFLA_BRIDGE_CFM);
if (!cfm_nest)
goto nla_put_failure;
if (filter_mask & RTEXT_FILTER_CFM_CONFIG) {
rcu_read_lock();
err = br_cfm_config_fill_info(skb, br);
rcu_read_unlock();
if (err)
goto nla_put_failure;
}
if (filter_mask & RTEXT_FILTER_CFM_STATUS) {
rcu_read_lock();
err = br_cfm_status_fill_info(skb, br, getlink);
rcu_read_unlock();
if (err)
goto nla_put_failure;
}
nla_nest_end(skb, cfm_nest);
}
if ((filter_mask & RTEXT_FILTER_MST) &&
br_opt_get(br, BROPT_MST_ENABLED) && port) {
const struct net_bridge_vlan_group *vg = nbp_vlan_group(port);
struct nlattr *mst_nest;
int err;
if (!vg || !vg->num_vlans)
goto done;
mst_nest = nla_nest_start(skb, IFLA_BRIDGE_MST);
if (!mst_nest)
goto nla_put_failure;
err = br_mst_fill_info(skb, vg);
if (err)
goto nla_put_failure;
nla_nest_end(skb, mst_nest);
}
done:
if (af) {
if (nlmsg_get_pos(skb) - (void *)af > nla_attr_size(0))
nla_nest_end(skb, af);
else
nla_nest_cancel(skb, af);
}
nlmsg_end(skb, nlh);
return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
void br_info_notify(int event, const struct net_bridge *br,
const struct net_bridge_port *port, u32 filter)
{
struct net_device *dev;
struct sk_buff *skb;
int err = -ENOBUFS;
struct net *net;
u16 port_no = 0;
if (WARN_ON(!port && !br))
return;
if (port) {
dev = port->dev;
br = port->br;
port_no = port->port_no;
} else {
dev = br->dev;
}
net = dev_net(dev);
br_debug(br, "port %u(%s) event %d\n", port_no, dev->name, event);
skb = nlmsg_new(br_nlmsg_size(dev, filter), GFP_ATOMIC);
if (skb == NULL)
goto errout;
err = br_fill_ifinfo(skb, port, 0, 0, event, 0, filter, dev, false);
if (err < 0) {
/* -EMSGSIZE implies BUG in br_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout;
}
rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
return;
errout:
rtnl_set_sk_err(net, RTNLGRP_LINK, err);
}
/* Notify listeners of a change in bridge or port information */
void br_ifinfo_notify(int event, const struct net_bridge *br,
const struct net_bridge_port *port)
{
u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED;
return br_info_notify(event, br, port, filter);
}
/*
* Dump information about all ports, in response to GETLINK
*/
int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev, u32 filter_mask, int nlflags)
{
struct net_bridge_port *port = br_port_get_rtnl(dev);
if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN) &&
!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) &&
!(filter_mask & RTEXT_FILTER_MRP) &&
!(filter_mask & RTEXT_FILTER_CFM_CONFIG) &&
!(filter_mask & RTEXT_FILTER_CFM_STATUS))
return 0;
return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags,
filter_mask, dev, true);
}
static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p,
int cmd, struct bridge_vlan_info *vinfo, bool *changed,
struct netlink_ext_ack *extack)
{
bool curr_change;
int err = 0;
switch (cmd) {
case RTM_SETLINK:
if (p) {
/* if the MASTER flag is set this will act on the global
* per-VLAN entry as well
*/
err = nbp_vlan_add(p, vinfo->vid, vinfo->flags,
&curr_change, extack);
} else {
vinfo->flags |= BRIDGE_VLAN_INFO_BRENTRY;
err = br_vlan_add(br, vinfo->vid, vinfo->flags,
&curr_change, extack);
}
if (curr_change)
*changed = true;
break;
case RTM_DELLINK:
if (p) {
if (!nbp_vlan_delete(p, vinfo->vid))
*changed = true;
if ((vinfo->flags & BRIDGE_VLAN_INFO_MASTER) &&
!br_vlan_delete(p->br, vinfo->vid))
*changed = true;
} else if (!br_vlan_delete(br, vinfo->vid)) {
*changed = true;
}
break;
}
return err;
}
int br_process_vlan_info(struct net_bridge *br,
struct net_bridge_port *p, int cmd,
struct bridge_vlan_info *vinfo_curr,
struct bridge_vlan_info **vinfo_last,
bool *changed,
struct netlink_ext_ack *extack)
{
int err, rtm_cmd;
if (!br_vlan_valid_id(vinfo_curr->vid, extack))
return -EINVAL;
/* needed for vlan-only NEWVLAN/DELVLAN notifications */
rtm_cmd = br_afspec_cmd_to_rtm(cmd);
if (vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
if (!br_vlan_valid_range(vinfo_curr, *vinfo_last, extack))
return -EINVAL;
*vinfo_last = vinfo_curr;
return 0;
}
if (*vinfo_last) {
struct bridge_vlan_info tmp_vinfo;
int v, v_change_start = 0;
if (!br_vlan_valid_range(vinfo_curr, *vinfo_last, extack))
return -EINVAL;
memcpy(&tmp_vinfo, *vinfo_last,
sizeof(struct bridge_vlan_info));
for (v = (*vinfo_last)->vid; v <= vinfo_curr->vid; v++) {
bool curr_change = false;
tmp_vinfo.vid = v;
err = br_vlan_info(br, p, cmd, &tmp_vinfo, &curr_change,
extack);
if (err)
break;
if (curr_change) {
*changed = curr_change;
if (!v_change_start)
v_change_start = v;
} else {
/* nothing to notify yet */
if (!v_change_start)
continue;
br_vlan_notify(br, p, v_change_start,
v - 1, rtm_cmd);
v_change_start = 0;
}
cond_resched();
}
/* v_change_start is set only if the last/whole range changed */
if (v_change_start)
br_vlan_notify(br, p, v_change_start,
v - 1, rtm_cmd);
*vinfo_last = NULL;
return err;
}
err = br_vlan_info(br, p, cmd, vinfo_curr, changed, extack);
if (*changed)
br_vlan_notify(br, p, vinfo_curr->vid, 0, rtm_cmd);
return err;
}
static int br_afspec(struct net_bridge *br,
struct net_bridge_port *p,
struct nlattr *af_spec,
int cmd, bool *changed,
struct netlink_ext_ack *extack)
{
struct bridge_vlan_info *vinfo_curr = NULL;
struct bridge_vlan_info *vinfo_last = NULL;
struct nlattr *attr;
struct vtunnel_info tinfo_last = {};
struct vtunnel_info tinfo_curr = {};
int err = 0, rem;
nla_for_each_nested(attr, af_spec, rem) {
err = 0;
switch (nla_type(attr)) {
case IFLA_BRIDGE_VLAN_TUNNEL_INFO:
if (!p || !(p->flags & BR_VLAN_TUNNEL))
return -EINVAL;
err = br_parse_vlan_tunnel_info(attr, &tinfo_curr);
if (err)
return err;
err = br_process_vlan_tunnel_info(br, p, cmd,
&tinfo_curr,
&tinfo_last,
changed);
if (err)
return err;
break;
case IFLA_BRIDGE_VLAN_INFO:
if (nla_len(attr) != sizeof(struct bridge_vlan_info))
return -EINVAL;
vinfo_curr = nla_data(attr);
err = br_process_vlan_info(br, p, cmd, vinfo_curr,
&vinfo_last, changed,
extack);
if (err)
return err;
break;
case IFLA_BRIDGE_MRP:
err = br_mrp_parse(br, p, attr, cmd, extack);
if (err)
return err;
break;
case IFLA_BRIDGE_CFM:
err = br_cfm_parse(br, p, attr, cmd, extack);
if (err)
return err;
break;
case IFLA_BRIDGE_MST:
if (!p) {
NL_SET_ERR_MSG(extack,
"MST states can only be set on bridge ports");
return -EINVAL;
}
if (cmd != RTM_SETLINK) {
NL_SET_ERR_MSG(extack,
"MST states can only be set through RTM_SETLINK");
return -EINVAL;
}
err = br_mst_process(p, attr, extack);
if (err)
return err;
break;
}
}
return err;
}
static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
[IFLA_BRPORT_UNSPEC] = { .strict_start_type =
IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT + 1 },
[IFLA_BRPORT_STATE] = { .type = NLA_U8 },
[IFLA_BRPORT_COST] = { .type = NLA_U32 },
[IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 },
[IFLA_BRPORT_MODE] = { .type = NLA_U8 },
[IFLA_BRPORT_GUARD] = { .type = NLA_U8 },
[IFLA_BRPORT_PROTECT] = { .type = NLA_U8 },
[IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
[IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
[IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
[IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 },
[IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
[IFLA_BRPORT_MULTICAST_ROUTER] = { .type = NLA_U8 },
[IFLA_BRPORT_MCAST_TO_UCAST] = { .type = NLA_U8 },
[IFLA_BRPORT_MCAST_FLOOD] = { .type = NLA_U8 },
[IFLA_BRPORT_BCAST_FLOOD] = { .type = NLA_U8 },
[IFLA_BRPORT_VLAN_TUNNEL] = { .type = NLA_U8 },
[IFLA_BRPORT_GROUP_FWD_MASK] = { .type = NLA_U16 },
[IFLA_BRPORT_NEIGH_SUPPRESS] = { .type = NLA_U8 },
[IFLA_BRPORT_ISOLATED] = { .type = NLA_U8 },
[IFLA_BRPORT_LOCKED] = { .type = NLA_U8 },
[IFLA_BRPORT_MAB] = { .type = NLA_U8 },
[IFLA_BRPORT_BACKUP_PORT] = { .type = NLA_U32 },
[IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT] = { .type = NLA_U32 },
[IFLA_BRPORT_MCAST_N_GROUPS] = { .type = NLA_REJECT },
[IFLA_BRPORT_MCAST_MAX_GROUPS] = { .type = NLA_U32 },
[IFLA_BRPORT_NEIGH_VLAN_SUPPRESS] = NLA_POLICY_MAX(NLA_U8, 1),
[IFLA_BRPORT_BACKUP_NHID] = { .type = NLA_U32 },
};
/* Change the state of the port and notify spanning tree */
static int br_set_port_state(struct net_bridge_port *p, u8 state)
{
if (state > BR_STATE_BLOCKING)
return -EINVAL;
/* if kernel STP is running, don't allow changes */
if (p->br->stp_enabled == BR_KERNEL_STP)
return -EBUSY;
/* if device is not up, change is not allowed
* if link is not present, only allowable state is disabled
*/
if (!netif_running(p->dev) ||
(!netif_oper_up(p->dev) && state != BR_STATE_DISABLED))
return -ENETDOWN;
br_set_state(p, state);
br_port_state_selection(p->br);
return 0;
}
/* Set/clear or port flags based on attribute */
static void br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[],
int attrtype, unsigned long mask)
{
if (!tb[attrtype])
return;
if (nla_get_u8(tb[attrtype]))
p->flags |= mask;
else
p->flags &= ~mask;
}
/* Process bridge protocol info on port */
static int br_setport(struct net_bridge_port *p, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
unsigned long old_flags, changed_mask;
bool br_vlan_tunnel_old;
int err;
old_flags = p->flags;
br_vlan_tunnel_old = (old_flags & BR_VLAN_TUNNEL) ? true : false;
br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE,
BR_MULTICAST_FAST_LEAVE);
br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK);
br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD);
br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD);
br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_TO_UCAST,
BR_MULTICAST_TO_UNICAST);
br_set_port_flag(p, tb, IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD);
br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP);
br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI);
br_set_port_flag(p, tb, IFLA_BRPORT_VLAN_TUNNEL, BR_VLAN_TUNNEL);
br_set_port_flag(p, tb, IFLA_BRPORT_NEIGH_SUPPRESS, BR_NEIGH_SUPPRESS);
br_set_port_flag(p, tb, IFLA_BRPORT_ISOLATED, BR_ISOLATED);
br_set_port_flag(p, tb, IFLA_BRPORT_LOCKED, BR_PORT_LOCKED);
br_set_port_flag(p, tb, IFLA_BRPORT_MAB, BR_PORT_MAB);
br_set_port_flag(p, tb, IFLA_BRPORT_NEIGH_VLAN_SUPPRESS,
BR_NEIGH_VLAN_SUPPRESS);
if ((p->flags & BR_PORT_MAB) &&
(!(p->flags & BR_PORT_LOCKED) || !(p->flags & BR_LEARNING))) {
NL_SET_ERR_MSG(extack, "Bridge port must be locked and have learning enabled when MAB is enabled");
p->flags = old_flags;
return -EINVAL;
} else if (!(p->flags & BR_PORT_MAB) && (old_flags & BR_PORT_MAB)) {
struct net_bridge_fdb_flush_desc desc = {
.flags = BIT(BR_FDB_LOCKED),
.flags_mask = BIT(BR_FDB_LOCKED),
.port_ifindex = p->dev->ifindex,
};
br_fdb_flush(p->br, &desc);
}
changed_mask = old_flags ^ p->flags;
err = br_switchdev_set_port_flag(p, p->flags, changed_mask, extack);
if (err) {
p->flags = old_flags;
return err;
}
if (br_vlan_tunnel_old && !(p->flags & BR_VLAN_TUNNEL))
nbp_vlan_tunnel_info_flush(p);
br_port_flags_change(p, changed_mask);
if (tb[IFLA_BRPORT_COST]) {
err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST]));
if (err)
return err;
}
if (tb[IFLA_BRPORT_PRIORITY]) {
err = br_stp_set_port_priority(p, nla_get_u16(tb[IFLA_BRPORT_PRIORITY]));
if (err)
return err;
}
if (tb[IFLA_BRPORT_STATE]) {
err = br_set_port_state(p, nla_get_u8(tb[IFLA_BRPORT_STATE]));
if (err)
return err;
}
if (tb[IFLA_BRPORT_FLUSH])
br_fdb_delete_by_port(p->br, p, 0, 0);
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
if (tb[IFLA_BRPORT_MULTICAST_ROUTER]) {
u8 mcast_router = nla_get_u8(tb[IFLA_BRPORT_MULTICAST_ROUTER]);
err = br_multicast_set_port_router(&p->multicast_ctx,
mcast_router);
if (err)
return err;
}
if (tb[IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT]) {
u32 hlimit;
hlimit = nla_get_u32(tb[IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT]);
err = br_multicast_eht_set_hosts_limit(p, hlimit);
if (err)
return err;
}
if (tb[IFLA_BRPORT_MCAST_MAX_GROUPS]) {
u32 max_groups;
max_groups = nla_get_u32(tb[IFLA_BRPORT_MCAST_MAX_GROUPS]);
br_multicast_ngroups_set_max(&p->multicast_ctx, max_groups);
}
#endif
if (tb[IFLA_BRPORT_GROUP_FWD_MASK]) {
u16 fwd_mask = nla_get_u16(tb[IFLA_BRPORT_GROUP_FWD_MASK]);
if (fwd_mask & BR_GROUPFWD_MACPAUSE)
return -EINVAL;
p->group_fwd_mask = fwd_mask;
}
if (tb[IFLA_BRPORT_BACKUP_PORT]) {
struct net_device *backup_dev = NULL;
u32 backup_ifindex;
backup_ifindex = nla_get_u32(tb[IFLA_BRPORT_BACKUP_PORT]);
if (backup_ifindex) {
backup_dev = __dev_get_by_index(dev_net(p->dev),
backup_ifindex);
if (!backup_dev)
return -ENOENT;
}
err = nbp_backup_change(p, backup_dev);
if (err)
return err;
}
if (tb[IFLA_BRPORT_BACKUP_NHID]) {
u32 backup_nhid = nla_get_u32(tb[IFLA_BRPORT_BACKUP_NHID]);
WRITE_ONCE(p->backup_nhid, backup_nhid);
}
return 0;
}
/* Change state and parameters on port. */
int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags,
struct netlink_ext_ack *extack)
{
struct net_bridge *br = (struct net_bridge *)netdev_priv(dev);
struct nlattr *tb[IFLA_BRPORT_MAX + 1];
struct net_bridge_port *p;
struct nlattr *protinfo;
struct nlattr *afspec;
bool changed = false;
int err = 0;
protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO);
afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
if (!protinfo && !afspec)
return 0;
p = br_port_get_rtnl(dev);
/* We want to accept dev as bridge itself if the AF_SPEC
* is set to see if someone is setting vlan info on the bridge
*/
if (!p && !afspec)
return -EINVAL;
if (p && protinfo) {
if (protinfo->nla_type & NLA_F_NESTED) {
err = nla_parse_nested_deprecated(tb, IFLA_BRPORT_MAX,
protinfo,
br_port_policy,
NULL);
if (err)
return err;
spin_lock_bh(&p->br->lock);
err = br_setport(p, tb, extack);
spin_unlock_bh(&p->br->lock);
} else {
/* Binary compatibility with old RSTP */
if (nla_len(protinfo) < sizeof(u8))
return -EINVAL;
spin_lock_bh(&p->br->lock);
err = br_set_port_state(p, nla_get_u8(protinfo));
spin_unlock_bh(&p->br->lock);
}
if (err)
goto out;
changed = true;
}
if (afspec)
err = br_afspec(br, p, afspec, RTM_SETLINK, &changed, extack);
if (changed)
br_ifinfo_notify(RTM_NEWLINK, br, p);
out:
return err;
}
/* Delete port information */
int br_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
{
struct net_bridge *br = (struct net_bridge *)netdev_priv(dev);
struct net_bridge_port *p;
struct nlattr *afspec;
bool changed = false;
int err = 0;
afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
if (!afspec)
return 0;
p = br_port_get_rtnl(dev);
/* We want to accept dev as bridge itself as well */
if (!p && !netif_is_bridge_master(dev))
return -EINVAL;
err = br_afspec(br, p, afspec, RTM_DELLINK, &changed, NULL);
if (changed)
/* Send RTM_NEWLINK because userspace
* expects RTM_NEWLINK for vlan dels
*/
br_ifinfo_notify(RTM_NEWLINK, br, p);
return err;
}
static int br_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
if (tb[IFLA_ADDRESS]) {
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
return -EINVAL;
if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
return -EADDRNOTAVAIL;
}
if (!data)
return 0;
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
if (data[IFLA_BR_VLAN_PROTOCOL] &&
!eth_type_vlan(nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL])))
return -EPROTONOSUPPORT;
if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
__u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
if (defpvid >= VLAN_VID_MASK)
return -EINVAL;
}
#endif
return 0;
}
static int br_port_slave_changelink(struct net_device *brdev,
struct net_device *dev,
struct nlattr *tb[],
struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct net_bridge *br = netdev_priv(brdev);
int ret;
if (!data)
return 0;
spin_lock_bh(&br->lock);
ret = br_setport(br_port_get_rtnl(dev), data, extack);
spin_unlock_bh(&br->lock);
return ret;
}
static int br_port_fill_slave_info(struct sk_buff *skb,
const struct net_device *brdev,
const struct net_device *dev)
{
return br_port_fill_attrs(skb, br_port_get_rtnl(dev));
}
static size_t br_port_get_slave_size(const struct net_device *brdev,
const struct net_device *dev)
{
return br_port_info_size();
}
static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = {
[IFLA_BR_FORWARD_DELAY] = { .type = NLA_U32 },
[IFLA_BR_HELLO_TIME] = { .type = NLA_U32 },
[IFLA_BR_MAX_AGE] = { .type = NLA_U32 },
[IFLA_BR_AGEING_TIME] = { .type = NLA_U32 },
[IFLA_BR_STP_STATE] = { .type = NLA_U32 },
[IFLA_BR_PRIORITY] = { .type = NLA_U16 },
[IFLA_BR_VLAN_FILTERING] = { .type = NLA_U8 },
[IFLA_BR_VLAN_PROTOCOL] = { .type = NLA_U16 },
[IFLA_BR_GROUP_FWD_MASK] = { .type = NLA_U16 },
[IFLA_BR_GROUP_ADDR] = { .type = NLA_BINARY,
.len = ETH_ALEN },
[IFLA_BR_MCAST_ROUTER] = { .type = NLA_U8 },
[IFLA_BR_MCAST_SNOOPING] = { .type = NLA_U8 },
[IFLA_BR_MCAST_QUERY_USE_IFADDR] = { .type = NLA_U8 },
[IFLA_BR_MCAST_QUERIER] = { .type = NLA_U8 },
[IFLA_BR_MCAST_HASH_ELASTICITY] = { .type = NLA_U32 },
[IFLA_BR_MCAST_HASH_MAX] = { .type = NLA_U32 },
[IFLA_BR_MCAST_LAST_MEMBER_CNT] = { .type = NLA_U32 },
[IFLA_BR_MCAST_STARTUP_QUERY_CNT] = { .type = NLA_U32 },
[IFLA_BR_MCAST_LAST_MEMBER_INTVL] = { .type = NLA_U64 },
[IFLA_BR_MCAST_MEMBERSHIP_INTVL] = { .type = NLA_U64 },
[IFLA_BR_MCAST_QUERIER_INTVL] = { .type = NLA_U64 },
[IFLA_BR_MCAST_QUERY_INTVL] = { .type = NLA_U64 },
[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 },
[IFLA_BR_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 },
[IFLA_BR_NF_CALL_IPTABLES] = { .type = NLA_U8 },
[IFLA_BR_NF_CALL_IP6TABLES] = { .type = NLA_U8 },
[IFLA_BR_NF_CALL_ARPTABLES] = { .type = NLA_U8 },
[IFLA_BR_VLAN_DEFAULT_PVID] = { .type = NLA_U16 },
[IFLA_BR_VLAN_STATS_ENABLED] = { .type = NLA_U8 },
[IFLA_BR_MCAST_STATS_ENABLED] = { .type = NLA_U8 },
[IFLA_BR_MCAST_IGMP_VERSION] = { .type = NLA_U8 },
[IFLA_BR_MCAST_MLD_VERSION] = { .type = NLA_U8 },
[IFLA_BR_VLAN_STATS_PER_PORT] = { .type = NLA_U8 },
[IFLA_BR_MULTI_BOOLOPT] =
NLA_POLICY_EXACT_LEN(sizeof(struct br_boolopt_multi)),
};
static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct net_bridge *br = netdev_priv(brdev);
int err;
if (!data)
return 0;
if (data[IFLA_BR_FORWARD_DELAY]) {
err = br_set_forward_delay(br, nla_get_u32(data[IFLA_BR_FORWARD_DELAY]));
if (err)
return err;
}
if (data[IFLA_BR_HELLO_TIME]) {
err = br_set_hello_time(br, nla_get_u32(data[IFLA_BR_HELLO_TIME]));
if (err)
return err;
}
if (data[IFLA_BR_MAX_AGE]) {
err = br_set_max_age(br, nla_get_u32(data[IFLA_BR_MAX_AGE]));
if (err)
return err;
}
if (data[IFLA_BR_AGEING_TIME]) {
err = br_set_ageing_time(br, nla_get_u32(data[IFLA_BR_AGEING_TIME]));
if (err)
return err;
}
if (data[IFLA_BR_STP_STATE]) {
u32 stp_enabled = nla_get_u32(data[IFLA_BR_STP_STATE]);
err = br_stp_set_enabled(br, stp_enabled, extack);
if (err)
return err;
}
if (data[IFLA_BR_PRIORITY]) {
u32 priority = nla_get_u16(data[IFLA_BR_PRIORITY]);
br_stp_set_bridge_priority(br, priority);
}
if (data[IFLA_BR_VLAN_FILTERING]) {
u8 vlan_filter = nla_get_u8(data[IFLA_BR_VLAN_FILTERING]);
err = br_vlan_filter_toggle(br, vlan_filter, extack);
if (err)
return err;
}
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
if (data[IFLA_BR_VLAN_PROTOCOL]) {
__be16 vlan_proto = nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL]);
err = __br_vlan_set_proto(br, vlan_proto, extack);
if (err)
return err;
}
if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
__u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
err = __br_vlan_set_default_pvid(br, defpvid, extack);
if (err)
return err;
}
if (data[IFLA_BR_VLAN_STATS_ENABLED]) {
__u8 vlan_stats = nla_get_u8(data[IFLA_BR_VLAN_STATS_ENABLED]);
err = br_vlan_set_stats(br, vlan_stats);
if (err)
return err;
}
if (data[IFLA_BR_VLAN_STATS_PER_PORT]) {
__u8 per_port = nla_get_u8(data[IFLA_BR_VLAN_STATS_PER_PORT]);
err = br_vlan_set_stats_per_port(br, per_port);
if (err)
return err;
}
#endif
if (data[IFLA_BR_GROUP_FWD_MASK]) {
u16 fwd_mask = nla_get_u16(data[IFLA_BR_GROUP_FWD_MASK]);
if (fwd_mask & BR_GROUPFWD_RESTRICTED)
return -EINVAL;
br->group_fwd_mask = fwd_mask;
}
if (data[IFLA_BR_GROUP_ADDR]) {
u8 new_addr[ETH_ALEN];
if (nla_len(data[IFLA_BR_GROUP_ADDR]) != ETH_ALEN)
return -EINVAL;
memcpy(new_addr, nla_data(data[IFLA_BR_GROUP_ADDR]), ETH_ALEN);
if (!is_link_local_ether_addr(new_addr))
return -EINVAL;
if (new_addr[5] == 1 || /* 802.3x Pause address */
new_addr[5] == 2 || /* 802.3ad Slow protocols */
new_addr[5] == 3) /* 802.1X PAE address */
return -EINVAL;
spin_lock_bh(&br->lock);
memcpy(br->group_addr, new_addr, sizeof(br->group_addr));
spin_unlock_bh(&br->lock);
br_opt_toggle(br, BROPT_GROUP_ADDR_SET, true);
br_recalculate_fwd_mask(br);
}
if (data[IFLA_BR_FDB_FLUSH]) {
struct net_bridge_fdb_flush_desc desc = {
.flags_mask = BIT(BR_FDB_STATIC)
};
br_fdb_flush(br, &desc);
}
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
if (data[IFLA_BR_MCAST_ROUTER]) {
u8 multicast_router = nla_get_u8(data[IFLA_BR_MCAST_ROUTER]);
err = br_multicast_set_router(&br->multicast_ctx,
multicast_router);
if (err)
return err;
}
if (data[IFLA_BR_MCAST_SNOOPING]) {
u8 mcast_snooping = nla_get_u8(data[IFLA_BR_MCAST_SNOOPING]);
err = br_multicast_toggle(br, mcast_snooping, extack);
if (err)
return err;
}
if (data[IFLA_BR_MCAST_QUERY_USE_IFADDR]) {
u8 val;
val = nla_get_u8(data[IFLA_BR_MCAST_QUERY_USE_IFADDR]);
br_opt_toggle(br, BROPT_MULTICAST_QUERY_USE_IFADDR, !!val);
}
if (data[IFLA_BR_MCAST_QUERIER]) {
u8 mcast_querier = nla_get_u8(data[IFLA_BR_MCAST_QUERIER]);
err = br_multicast_set_querier(&br->multicast_ctx,
mcast_querier);
if (err)
return err;
}
if (data[IFLA_BR_MCAST_HASH_ELASTICITY])
br_warn(br, "the hash_elasticity option has been deprecated and is always %u\n",
RHT_ELASTICITY);
if (data[IFLA_BR_MCAST_HASH_MAX])
br->hash_max = nla_get_u32(data[IFLA_BR_MCAST_HASH_MAX]);
if (data[IFLA_BR_MCAST_LAST_MEMBER_CNT]) {
u32 val = nla_get_u32(data[IFLA_BR_MCAST_LAST_MEMBER_CNT]);
br->multicast_ctx.multicast_last_member_count = val;
}
if (data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]) {
u32 val = nla_get_u32(data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]);
br->multicast_ctx.multicast_startup_query_count = val;
}
if (data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]) {
u64 val = nla_get_u64(data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]);
br->multicast_ctx.multicast_last_member_interval = clock_t_to_jiffies(val);
}
if (data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]) {
u64 val = nla_get_u64(data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]);
br->multicast_ctx.multicast_membership_interval = clock_t_to_jiffies(val);
}
if (data[IFLA_BR_MCAST_QUERIER_INTVL]) {
u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERIER_INTVL]);
br->multicast_ctx.multicast_querier_interval = clock_t_to_jiffies(val);
}
if (data[IFLA_BR_MCAST_QUERY_INTVL]) {
u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]);
br_multicast_set_query_intvl(&br->multicast_ctx, val);
}
if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) {
u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]);
br->multicast_ctx.multicast_query_response_interval = clock_t_to_jiffies(val);
}
if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) {
u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]);
br_multicast_set_startup_query_intvl(&br->multicast_ctx, val);
}
if (data[IFLA_BR_MCAST_STATS_ENABLED]) {
__u8 mcast_stats;
mcast_stats = nla_get_u8(data[IFLA_BR_MCAST_STATS_ENABLED]);
br_opt_toggle(br, BROPT_MULTICAST_STATS_ENABLED, !!mcast_stats);
}
if (data[IFLA_BR_MCAST_IGMP_VERSION]) {
__u8 igmp_version;
igmp_version = nla_get_u8(data[IFLA_BR_MCAST_IGMP_VERSION]);
err = br_multicast_set_igmp_version(&br->multicast_ctx,
igmp_version);
if (err)
return err;
}
#if IS_ENABLED(CONFIG_IPV6)
if (data[IFLA_BR_MCAST_MLD_VERSION]) {
__u8 mld_version;
mld_version = nla_get_u8(data[IFLA_BR_MCAST_MLD_VERSION]);
err = br_multicast_set_mld_version(&br->multicast_ctx,
mld_version);
if (err)
return err;
}
#endif
#endif
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
if (data[IFLA_BR_NF_CALL_IPTABLES]) {
u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IPTABLES]);
br_opt_toggle(br, BROPT_NF_CALL_IPTABLES, !!val);
}
if (data[IFLA_BR_NF_CALL_IP6TABLES]) {
u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IP6TABLES]);
br_opt_toggle(br, BROPT_NF_CALL_IP6TABLES, !!val);
}
if (data[IFLA_BR_NF_CALL_ARPTABLES]) {
u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_ARPTABLES]);
br_opt_toggle(br, BROPT_NF_CALL_ARPTABLES, !!val);
}
#endif
if (data[IFLA_BR_MULTI_BOOLOPT]) {
struct br_boolopt_multi *bm;
bm = nla_data(data[IFLA_BR_MULTI_BOOLOPT]);
err = br_boolopt_multi_toggle(br, bm, extack);
if (err)
return err;
}
return 0;
}
static int br_dev_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct net_bridge *br = netdev_priv(dev);
int err;
err = register_netdevice(dev);
if (err)
return err;
if (tb[IFLA_ADDRESS]) {
spin_lock_bh(&br->lock);
br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
spin_unlock_bh(&br->lock);
}
err = br_changelink(dev, tb, data, extack);
if (err)
br_dev_delete(dev, NULL);
return err;
}
static size_t br_get_size(const struct net_device *brdev)
{
return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */
nla_total_size(sizeof(u32)) + /* IFLA_BR_HELLO_TIME */
nla_total_size(sizeof(u32)) + /* IFLA_BR_MAX_AGE */
nla_total_size(sizeof(u32)) + /* IFLA_BR_AGEING_TIME */
nla_total_size(sizeof(u32)) + /* IFLA_BR_STP_STATE */
nla_total_size(sizeof(u16)) + /* IFLA_BR_PRIORITY */
nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_FILTERING */
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
nla_total_size(sizeof(__be16)) + /* IFLA_BR_VLAN_PROTOCOL */
nla_total_size(sizeof(u16)) + /* IFLA_BR_VLAN_DEFAULT_PVID */
nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_STATS_ENABLED */
nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_STATS_PER_PORT */
#endif
nla_total_size(sizeof(u16)) + /* IFLA_BR_GROUP_FWD_MASK */
nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_ROOT_ID */
nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_BRIDGE_ID */
nla_total_size(sizeof(u16)) + /* IFLA_BR_ROOT_PORT */
nla_total_size(sizeof(u32)) + /* IFLA_BR_ROOT_PATH_COST */
nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE */
nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE_DETECTED */
nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_HELLO_TIMER */
nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TCN_TIMER */
nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */
nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_GC_TIMER */
nla_total_size(ETH_ALEN) + /* IFLA_BR_GROUP_ADDR */
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_ROUTER */
nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_SNOOPING */
nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERY_USE_IFADDR */
nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERIER */
nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_STATS_ENABLED */
nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_ELASTICITY */
nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_MAX */
nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_LAST_MEMBER_CNT */
nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_STARTUP_QUERY_CNT */
nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */
nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */
nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERIER_INTVL */
nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_INTVL */
nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */
nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */
nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_IGMP_VERSION */
nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_MLD_VERSION */
br_multicast_querier_state_size() + /* IFLA_BR_MCAST_QUERIER_STATE */
#endif
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IPTABLES */
nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IP6TABLES */
nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_ARPTABLES */
#endif
nla_total_size(sizeof(struct br_boolopt_multi)) + /* IFLA_BR_MULTI_BOOLOPT */
0;
}
static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
{
struct net_bridge *br = netdev_priv(brdev);
u32 forward_delay = jiffies_to_clock_t(br->forward_delay);
u32 hello_time = jiffies_to_clock_t(br->hello_time);
u32 age_time = jiffies_to_clock_t(br->max_age);
u32 ageing_time = jiffies_to_clock_t(br->ageing_time);
u32 stp_enabled = br->stp_enabled;
u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1];
u8 vlan_enabled = br_vlan_enabled(br->dev);
struct br_boolopt_multi bm;
u64 clockval;
clockval = br_timer_value(&br->hello_timer);
if (nla_put_u64_64bit(skb, IFLA_BR_HELLO_TIMER, clockval, IFLA_BR_PAD))
return -EMSGSIZE;
clockval = br_timer_value(&br->tcn_timer);
if (nla_put_u64_64bit(skb, IFLA_BR_TCN_TIMER, clockval, IFLA_BR_PAD))
return -EMSGSIZE;
clockval = br_timer_value(&br->topology_change_timer);
if (nla_put_u64_64bit(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval,
IFLA_BR_PAD))
return -EMSGSIZE;
clockval = br_timer_value(&br->gc_work.timer);
if (nla_put_u64_64bit(skb, IFLA_BR_GC_TIMER, clockval, IFLA_BR_PAD))
return -EMSGSIZE;
br_boolopt_multi_get(br, &bm);
if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) ||
nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) ||
nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time) ||
nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) ||
nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) ||
nla_put_u16(skb, IFLA_BR_PRIORITY, priority) ||
nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled) ||
nla_put_u16(skb, IFLA_BR_GROUP_FWD_MASK, br->group_fwd_mask) ||
nla_put(skb, IFLA_BR_BRIDGE_ID, sizeof(struct ifla_bridge_id),
&br->bridge_id) ||
nla_put(skb, IFLA_BR_ROOT_ID, sizeof(struct ifla_bridge_id),
&br->designated_root) ||
nla_put_u16(skb, IFLA_BR_ROOT_PORT, br->root_port) ||
nla_put_u32(skb, IFLA_BR_ROOT_PATH_COST, br->root_path_cost) ||
nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE, br->topology_change) ||
nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
br->topology_change_detected) ||
nla_put(skb, IFLA_BR_GROUP_ADDR, ETH_ALEN, br->group_addr) ||
nla_put(skb, IFLA_BR_MULTI_BOOLOPT, sizeof(bm), &bm))
return -EMSGSIZE;
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto) ||
nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid) ||
nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED,
br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) ||
nla_put_u8(skb, IFLA_BR_VLAN_STATS_PER_PORT,
br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)))
return -EMSGSIZE;
#endif
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
if (nla_put_u8(skb, IFLA_BR_MCAST_ROUTER,
br->multicast_ctx.multicast_router) ||
nla_put_u8(skb, IFLA_BR_MCAST_SNOOPING,
br_opt_get(br, BROPT_MULTICAST_ENABLED)) ||
nla_put_u8(skb, IFLA_BR_MCAST_QUERY_USE_IFADDR,
br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR)) ||
nla_put_u8(skb, IFLA_BR_MCAST_QUERIER,
br->multicast_ctx.multicast_querier) ||
nla_put_u8(skb, IFLA_BR_MCAST_STATS_ENABLED,
br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) ||
nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY, RHT_ELASTICITY) ||
nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) ||
nla_put_u32(skb, IFLA_BR_MCAST_LAST_MEMBER_CNT,
br->multicast_ctx.multicast_last_member_count) ||
nla_put_u32(skb, IFLA_BR_MCAST_STARTUP_QUERY_CNT,
br->multicast_ctx.multicast_startup_query_count) ||
nla_put_u8(skb, IFLA_BR_MCAST_IGMP_VERSION,
br->multicast_ctx.multicast_igmp_version) ||
br_multicast_dump_querier_state(skb, &br->multicast_ctx,
IFLA_BR_MCAST_QUERIER_STATE))
return -EMSGSIZE;
#if IS_ENABLED(CONFIG_IPV6)
if (nla_put_u8(skb, IFLA_BR_MCAST_MLD_VERSION,
br->multicast_ctx.multicast_mld_version))
return -EMSGSIZE;
#endif
clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_last_member_interval);
if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval,
IFLA_BR_PAD))
return -EMSGSIZE;
clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_membership_interval);
if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval,
IFLA_BR_PAD))
return -EMSGSIZE;
clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_querier_interval);
if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval,
IFLA_BR_PAD))
return -EMSGSIZE;
clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_query_interval);
if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval,
IFLA_BR_PAD))
return -EMSGSIZE;
clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_query_response_interval);
if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval,
IFLA_BR_PAD))
return -EMSGSIZE;
clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_startup_query_interval);
if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval,
IFLA_BR_PAD))
return -EMSGSIZE;
#endif
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
if (nla_put_u8(skb, IFLA_BR_NF_CALL_IPTABLES,
br_opt_get(br, BROPT_NF_CALL_IPTABLES) ? 1 : 0) ||
nla_put_u8(skb, IFLA_BR_NF_CALL_IP6TABLES,
br_opt_get(br, BROPT_NF_CALL_IP6TABLES) ? 1 : 0) ||
nla_put_u8(skb, IFLA_BR_NF_CALL_ARPTABLES,
br_opt_get(br, BROPT_NF_CALL_ARPTABLES) ? 1 : 0))
return -EMSGSIZE;
#endif
return 0;
}
static size_t br_get_linkxstats_size(const struct net_device *dev, int attr)
{
struct net_bridge_port *p = NULL;
struct net_bridge_vlan_group *vg;
struct net_bridge_vlan *v;
struct net_bridge *br;
int numvls = 0;
switch (attr) {
case IFLA_STATS_LINK_XSTATS:
br = netdev_priv(dev);
vg = br_vlan_group(br);
break;
case IFLA_STATS_LINK_XSTATS_SLAVE:
p = br_port_get_rtnl(dev);
if (!p)
return 0;
vg = nbp_vlan_group(p);
break;
default:
return 0;
}
if (vg) {
/* we need to count all, even placeholder entries */
list_for_each_entry(v, &vg->vlan_list, vlist)
numvls++;
}
return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) +
nla_total_size_64bit(sizeof(struct br_mcast_stats)) +
(p ? nla_total_size_64bit(sizeof(p->stp_xstats)) : 0) +
nla_total_size(0);
}
static int br_fill_linkxstats(struct sk_buff *skb,
const struct net_device *dev,
int *prividx, int attr)
{
struct nlattr *nla __maybe_unused;
struct net_bridge_port *p = NULL;
struct net_bridge_vlan_group *vg;
struct net_bridge_vlan *v;
struct net_bridge *br;
struct nlattr *nest;
int vl_idx = 0;
switch (attr) {
case IFLA_STATS_LINK_XSTATS:
br = netdev_priv(dev);
vg = br_vlan_group(br);
break;
case IFLA_STATS_LINK_XSTATS_SLAVE:
p = br_port_get_rtnl(dev);
if (!p)
return 0;
br = p->br;
vg = nbp_vlan_group(p);
break;
default:
return -EINVAL;
}
nest = nla_nest_start_noflag(skb, LINK_XSTATS_TYPE_BRIDGE);
if (!nest)
return -EMSGSIZE;
if (vg) {
u16 pvid;
pvid = br_get_pvid(vg);
list_for_each_entry(v, &vg->vlan_list, vlist) {
struct bridge_vlan_xstats vxi;
struct pcpu_sw_netstats stats;
if (++vl_idx < *prividx)
continue;
memset(&vxi, 0, sizeof(vxi));
vxi.vid = v->vid;
vxi.flags = v->flags;
if (v->vid == pvid)
vxi.flags |= BRIDGE_VLAN_INFO_PVID;
br_vlan_get_stats(v, &stats);
vxi.rx_bytes = u64_stats_read(&stats.rx_bytes);
vxi.rx_packets = u64_stats_read(&stats.rx_packets);
vxi.tx_bytes = u64_stats_read(&stats.tx_bytes);
vxi.tx_packets = u64_stats_read(&stats.tx_packets);
if (nla_put(skb, BRIDGE_XSTATS_VLAN, sizeof(vxi), &vxi))
goto nla_put_failure;
}
}
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
if (++vl_idx >= *prividx) {
nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_MCAST,
sizeof(struct br_mcast_stats),
BRIDGE_XSTATS_PAD);
if (!nla)
goto nla_put_failure;
br_multicast_get_stats(br, p, nla_data(nla));
}
#endif
if (p) {
nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_STP,
sizeof(p->stp_xstats),
BRIDGE_XSTATS_PAD);
if (!nla)
goto nla_put_failure;
spin_lock_bh(&br->lock);
memcpy(nla_data(nla), &p->stp_xstats, sizeof(p->stp_xstats));
spin_unlock_bh(&br->lock);
}
nla_nest_end(skb, nest);
*prividx = 0;
return 0;
nla_put_failure:
nla_nest_end(skb, nest);
*prividx = vl_idx;
return -EMSGSIZE;
}
static struct rtnl_af_ops br_af_ops __read_mostly = {
.family = AF_BRIDGE,
.get_link_af_size = br_get_link_af_size_filtered,
};
struct rtnl_link_ops br_link_ops __read_mostly = {
.kind = "bridge",
.priv_size = sizeof(struct net_bridge),
.setup = br_dev_setup,
.maxtype = IFLA_BR_MAX,
.policy = br_policy,
.validate = br_validate,
.newlink = br_dev_newlink,
.changelink = br_changelink,
.dellink = br_dev_delete,
.get_size = br_get_size,
.fill_info = br_fill_info,
.fill_linkxstats = br_fill_linkxstats,
.get_linkxstats_size = br_get_linkxstats_size,
.slave_maxtype = IFLA_BRPORT_MAX,
.slave_policy = br_port_policy,
.slave_changelink = br_port_slave_changelink,
.get_slave_size = br_port_get_slave_size,
.fill_slave_info = br_port_fill_slave_info,
};
int __init br_netlink_init(void)
{
int err;
br_vlan_rtnl_init();
rtnl_af_register(&br_af_ops);
err = rtnl_link_register(&br_link_ops);
if (err)
goto out_af;
return 0;
out_af:
rtnl_af_unregister(&br_af_ops);
return err;
}
void br_netlink_fini(void)
{
br_vlan_rtnl_uninit();
rtnl_af_unregister(&br_af_ops);
rtnl_link_unregister(&br_link_ops);
}
| linux-master | net/bridge/br_netlink.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Bridge multicast support.
*
* Copyright (c) 2010 Herbert Xu <[email protected]>
*/
#include <linux/err.h>
#include <linux/export.h>
#include <linux/if_ether.h>
#include <linux/igmp.h>
#include <linux/in.h>
#include <linux/jhash.h>
#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/netdevice.h>
#include <linux/netfilter_bridge.h>
#include <linux/random.h>
#include <linux/rculist.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/inetdevice.h>
#include <linux/mroute.h>
#include <net/ip.h>
#include <net/switchdev.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <linux/icmpv6.h>
#include <net/ipv6.h>
#include <net/mld.h>
#include <net/ip6_checksum.h>
#include <net/addrconf.h>
#endif
#include <trace/events/bridge.h>
#include "br_private.h"
#include "br_private_mcast_eht.h"
static const struct rhashtable_params br_mdb_rht_params = {
.head_offset = offsetof(struct net_bridge_mdb_entry, rhnode),
.key_offset = offsetof(struct net_bridge_mdb_entry, addr),
.key_len = sizeof(struct br_ip),
.automatic_shrinking = true,
};
static const struct rhashtable_params br_sg_port_rht_params = {
.head_offset = offsetof(struct net_bridge_port_group, rhnode),
.key_offset = offsetof(struct net_bridge_port_group, key),
.key_len = sizeof(struct net_bridge_port_group_sg_key),
.automatic_shrinking = true,
};
static void br_multicast_start_querier(struct net_bridge_mcast *brmctx,
struct bridge_mcast_own_query *query);
static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx);
static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
__be32 group,
__u16 vid,
const unsigned char *src);
static void br_multicast_port_group_rexmit(struct timer_list *t);
static void
br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted);
static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx);
#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
const struct in6_addr *group,
__u16 vid, const unsigned char *src);
#endif
static struct net_bridge_port_group *
__br_multicast_add_group(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct br_ip *group,
const unsigned char *src,
u8 filter_mode,
bool igmpv2_mldv1,
bool blocked);
static void br_multicast_find_del_pg(struct net_bridge *br,
struct net_bridge_port_group *pg);
static void __br_multicast_stop(struct net_bridge_mcast *brmctx);
static int br_mc_disabled_update(struct net_device *dev, bool value,
struct netlink_ext_ack *extack);
static struct net_bridge_port_group *
br_sg_port_find(struct net_bridge *br,
struct net_bridge_port_group_sg_key *sg_p)
{
lockdep_assert_held_once(&br->multicast_lock);
return rhashtable_lookup_fast(&br->sg_port_tbl, sg_p,
br_sg_port_rht_params);
}
static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br,
struct br_ip *dst)
{
return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
}
struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br,
struct br_ip *dst)
{
struct net_bridge_mdb_entry *ent;
lockdep_assert_held_once(&br->multicast_lock);
rcu_read_lock();
ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
rcu_read_unlock();
return ent;
}
static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br,
__be32 dst, __u16 vid)
{
struct br_ip br_dst;
memset(&br_dst, 0, sizeof(br_dst));
br_dst.dst.ip4 = dst;
br_dst.proto = htons(ETH_P_IP);
br_dst.vid = vid;
return br_mdb_ip_get(br, &br_dst);
}
#if IS_ENABLED(CONFIG_IPV6)
static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br,
const struct in6_addr *dst,
__u16 vid)
{
struct br_ip br_dst;
memset(&br_dst, 0, sizeof(br_dst));
br_dst.dst.ip6 = *dst;
br_dst.proto = htons(ETH_P_IPV6);
br_dst.vid = vid;
return br_mdb_ip_get(br, &br_dst);
}
#endif
struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge_mcast *brmctx,
struct sk_buff *skb, u16 vid)
{
struct net_bridge *br = brmctx->br;
struct br_ip ip;
if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
br_multicast_ctx_vlan_global_disabled(brmctx))
return NULL;
if (BR_INPUT_SKB_CB(skb)->igmp)
return NULL;
memset(&ip, 0, sizeof(ip));
ip.proto = skb->protocol;
ip.vid = vid;
switch (skb->protocol) {
case htons(ETH_P_IP):
ip.dst.ip4 = ip_hdr(skb)->daddr;
if (brmctx->multicast_igmp_version == 3) {
struct net_bridge_mdb_entry *mdb;
ip.src.ip4 = ip_hdr(skb)->saddr;
mdb = br_mdb_ip_get_rcu(br, &ip);
if (mdb)
return mdb;
ip.src.ip4 = 0;
}
break;
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
ip.dst.ip6 = ipv6_hdr(skb)->daddr;
if (brmctx->multicast_mld_version == 2) {
struct net_bridge_mdb_entry *mdb;
ip.src.ip6 = ipv6_hdr(skb)->saddr;
mdb = br_mdb_ip_get_rcu(br, &ip);
if (mdb)
return mdb;
memset(&ip.src.ip6, 0, sizeof(ip.src.ip6));
}
break;
#endif
default:
ip.proto = 0;
ether_addr_copy(ip.dst.mac_addr, eth_hdr(skb)->h_dest);
}
return br_mdb_ip_get_rcu(br, &ip);
}
/* IMPORTANT: this function must be used only when the contexts cannot be
* passed down (e.g. timer) and must be used for read-only purposes because
* the vlan snooping option can change, so it can return any context
* (non-vlan or vlan). Its initial intended purpose is to read timer values
* from the *current* context based on the option. At worst that could lead
* to inconsistent timers when the contexts are changed, i.e. src timer
* which needs to re-arm with a specific delay taken from the old context
*/
static struct net_bridge_mcast_port *
br_multicast_pg_to_port_ctx(const struct net_bridge_port_group *pg)
{
struct net_bridge_mcast_port *pmctx = &pg->key.port->multicast_ctx;
struct net_bridge_vlan *vlan;
lockdep_assert_held_once(&pg->key.port->br->multicast_lock);
/* if vlan snooping is disabled use the port's multicast context */
if (!pg->key.addr.vid ||
!br_opt_get(pg->key.port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED))
goto out;
/* locking is tricky here, due to different rules for multicast and
* vlans we need to take rcu to find the vlan and make sure it has
* the BR_VLFLAG_MCAST_ENABLED flag set, it can only change under
* multicast_lock which must be already held here, so the vlan's pmctx
* can safely be used on return
*/
rcu_read_lock();
vlan = br_vlan_find(nbp_vlan_group_rcu(pg->key.port), pg->key.addr.vid);
if (vlan && !br_multicast_port_ctx_vlan_disabled(&vlan->port_mcast_ctx))
pmctx = &vlan->port_mcast_ctx;
else
pmctx = NULL;
rcu_read_unlock();
out:
return pmctx;
}
static struct net_bridge_mcast_port *
br_multicast_port_vid_to_port_ctx(struct net_bridge_port *port, u16 vid)
{
struct net_bridge_mcast_port *pmctx = NULL;
struct net_bridge_vlan *vlan;
lockdep_assert_held_once(&port->br->multicast_lock);
if (!br_opt_get(port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED))
return NULL;
/* Take RCU to access the vlan. */
rcu_read_lock();
vlan = br_vlan_find(nbp_vlan_group_rcu(port), vid);
if (vlan && !br_multicast_port_ctx_vlan_disabled(&vlan->port_mcast_ctx))
pmctx = &vlan->port_mcast_ctx;
rcu_read_unlock();
return pmctx;
}
/* when snooping we need to check if the contexts should be used
* in the following order:
* - if pmctx is non-NULL (port), check if it should be used
* - if pmctx is NULL (bridge), check if brmctx should be used
*/
static bool
br_multicast_ctx_should_use(const struct net_bridge_mcast *brmctx,
const struct net_bridge_mcast_port *pmctx)
{
if (!netif_running(brmctx->br->dev))
return false;
if (pmctx)
return !br_multicast_port_ctx_state_disabled(pmctx);
else
return !br_multicast_ctx_vlan_disabled(brmctx);
}
static bool br_port_group_equal(struct net_bridge_port_group *p,
struct net_bridge_port *port,
const unsigned char *src)
{
if (p->key.port != port)
return false;
if (!(port->flags & BR_MULTICAST_TO_UNICAST))
return true;
return ether_addr_equal(src, p->eth_addr);
}
static void __fwd_add_star_excl(struct net_bridge_mcast_port *pmctx,
struct net_bridge_port_group *pg,
struct br_ip *sg_ip)
{
struct net_bridge_port_group_sg_key sg_key;
struct net_bridge_port_group *src_pg;
struct net_bridge_mcast *brmctx;
memset(&sg_key, 0, sizeof(sg_key));
brmctx = br_multicast_port_ctx_get_global(pmctx);
sg_key.port = pg->key.port;
sg_key.addr = *sg_ip;
if (br_sg_port_find(brmctx->br, &sg_key))
return;
src_pg = __br_multicast_add_group(brmctx, pmctx,
sg_ip, pg->eth_addr,
MCAST_INCLUDE, false, false);
if (IS_ERR_OR_NULL(src_pg) ||
src_pg->rt_protocol != RTPROT_KERNEL)
return;
src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
}
static void __fwd_del_star_excl(struct net_bridge_port_group *pg,
struct br_ip *sg_ip)
{
struct net_bridge_port_group_sg_key sg_key;
struct net_bridge *br = pg->key.port->br;
struct net_bridge_port_group *src_pg;
memset(&sg_key, 0, sizeof(sg_key));
sg_key.port = pg->key.port;
sg_key.addr = *sg_ip;
src_pg = br_sg_port_find(br, &sg_key);
if (!src_pg || !(src_pg->flags & MDB_PG_FLAGS_STAR_EXCL) ||
src_pg->rt_protocol != RTPROT_KERNEL)
return;
br_multicast_find_del_pg(br, src_pg);
}
/* When a port group transitions to (or is added as) EXCLUDE we need to add it
* to all other ports' S,G entries which are not blocked by the current group
* for proper replication, the assumption is that any S,G blocked entries
* are already added so the S,G,port lookup should skip them.
* When a port group transitions from EXCLUDE -> INCLUDE mode or is being
* deleted we need to remove it from all ports' S,G entries where it was
* automatically installed before (i.e. where it's MDB_PG_FLAGS_STAR_EXCL).
*/
void br_multicast_star_g_handle_mode(struct net_bridge_port_group *pg,
u8 filter_mode)
{
struct net_bridge *br = pg->key.port->br;
struct net_bridge_port_group *pg_lst;
struct net_bridge_mcast_port *pmctx;
struct net_bridge_mdb_entry *mp;
struct br_ip sg_ip;
if (WARN_ON(!br_multicast_is_star_g(&pg->key.addr)))
return;
mp = br_mdb_ip_get(br, &pg->key.addr);
if (!mp)
return;
pmctx = br_multicast_pg_to_port_ctx(pg);
if (!pmctx)
return;
memset(&sg_ip, 0, sizeof(sg_ip));
sg_ip = pg->key.addr;
for (pg_lst = mlock_dereference(mp->ports, br);
pg_lst;
pg_lst = mlock_dereference(pg_lst->next, br)) {
struct net_bridge_group_src *src_ent;
if (pg_lst == pg)
continue;
hlist_for_each_entry(src_ent, &pg_lst->src_list, node) {
if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
continue;
sg_ip.src = src_ent->addr.src;
switch (filter_mode) {
case MCAST_INCLUDE:
__fwd_del_star_excl(pg, &sg_ip);
break;
case MCAST_EXCLUDE:
__fwd_add_star_excl(pmctx, pg, &sg_ip);
break;
}
}
}
}
/* called when adding a new S,G with host_joined == false by default */
static void br_multicast_sg_host_state(struct net_bridge_mdb_entry *star_mp,
struct net_bridge_port_group *sg)
{
struct net_bridge_mdb_entry *sg_mp;
if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
return;
if (!star_mp->host_joined)
return;
sg_mp = br_mdb_ip_get(star_mp->br, &sg->key.addr);
if (!sg_mp)
return;
sg_mp->host_joined = true;
}
/* set the host_joined state of all of *,G's S,G entries */
static void br_multicast_star_g_host_state(struct net_bridge_mdb_entry *star_mp)
{
struct net_bridge *br = star_mp->br;
struct net_bridge_mdb_entry *sg_mp;
struct net_bridge_port_group *pg;
struct br_ip sg_ip;
if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
return;
memset(&sg_ip, 0, sizeof(sg_ip));
sg_ip = star_mp->addr;
for (pg = mlock_dereference(star_mp->ports, br);
pg;
pg = mlock_dereference(pg->next, br)) {
struct net_bridge_group_src *src_ent;
hlist_for_each_entry(src_ent, &pg->src_list, node) {
if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
continue;
sg_ip.src = src_ent->addr.src;
sg_mp = br_mdb_ip_get(br, &sg_ip);
if (!sg_mp)
continue;
sg_mp->host_joined = star_mp->host_joined;
}
}
}
static void br_multicast_sg_del_exclude_ports(struct net_bridge_mdb_entry *sgmp)
{
struct net_bridge_port_group __rcu **pp;
struct net_bridge_port_group *p;
/* *,G exclude ports are only added to S,G entries */
if (WARN_ON(br_multicast_is_star_g(&sgmp->addr)))
return;
/* we need the STAR_EXCLUDE ports if there are non-STAR_EXCLUDE ports
* we should ignore perm entries since they're managed by user-space
*/
for (pp = &sgmp->ports;
(p = mlock_dereference(*pp, sgmp->br)) != NULL;
pp = &p->next)
if (!(p->flags & (MDB_PG_FLAGS_STAR_EXCL |
MDB_PG_FLAGS_PERMANENT)))
return;
/* currently the host can only have joined the *,G which means
* we treat it as EXCLUDE {}, so for an S,G it's considered a
* STAR_EXCLUDE entry and we can safely leave it
*/
sgmp->host_joined = false;
for (pp = &sgmp->ports;
(p = mlock_dereference(*pp, sgmp->br)) != NULL;) {
if (!(p->flags & MDB_PG_FLAGS_PERMANENT))
br_multicast_del_pg(sgmp, p, pp);
else
pp = &p->next;
}
}
void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry *star_mp,
struct net_bridge_port_group *sg)
{
struct net_bridge_port_group_sg_key sg_key;
struct net_bridge *br = star_mp->br;
struct net_bridge_mcast_port *pmctx;
struct net_bridge_port_group *pg;
struct net_bridge_mcast *brmctx;
if (WARN_ON(br_multicast_is_star_g(&sg->key.addr)))
return;
if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
return;
br_multicast_sg_host_state(star_mp, sg);
memset(&sg_key, 0, sizeof(sg_key));
sg_key.addr = sg->key.addr;
/* we need to add all exclude ports to the S,G */
for (pg = mlock_dereference(star_mp->ports, br);
pg;
pg = mlock_dereference(pg->next, br)) {
struct net_bridge_port_group *src_pg;
if (pg == sg || pg->filter_mode == MCAST_INCLUDE)
continue;
sg_key.port = pg->key.port;
if (br_sg_port_find(br, &sg_key))
continue;
pmctx = br_multicast_pg_to_port_ctx(pg);
if (!pmctx)
continue;
brmctx = br_multicast_port_ctx_get_global(pmctx);
src_pg = __br_multicast_add_group(brmctx, pmctx,
&sg->key.addr,
sg->eth_addr,
MCAST_INCLUDE, false, false);
if (IS_ERR_OR_NULL(src_pg) ||
src_pg->rt_protocol != RTPROT_KERNEL)
continue;
src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
}
}
static void br_multicast_fwd_src_add(struct net_bridge_group_src *src)
{
struct net_bridge_mdb_entry *star_mp;
struct net_bridge_mcast_port *pmctx;
struct net_bridge_port_group *sg;
struct net_bridge_mcast *brmctx;
struct br_ip sg_ip;
if (src->flags & BR_SGRP_F_INSTALLED)
return;
memset(&sg_ip, 0, sizeof(sg_ip));
pmctx = br_multicast_pg_to_port_ctx(src->pg);
if (!pmctx)
return;
brmctx = br_multicast_port_ctx_get_global(pmctx);
sg_ip = src->pg->key.addr;
sg_ip.src = src->addr.src;
sg = __br_multicast_add_group(brmctx, pmctx, &sg_ip,
src->pg->eth_addr, MCAST_INCLUDE, false,
!timer_pending(&src->timer));
if (IS_ERR_OR_NULL(sg))
return;
src->flags |= BR_SGRP_F_INSTALLED;
sg->flags &= ~MDB_PG_FLAGS_STAR_EXCL;
/* if it was added by user-space as perm we can skip next steps */
if (sg->rt_protocol != RTPROT_KERNEL &&
(sg->flags & MDB_PG_FLAGS_PERMANENT))
return;
/* the kernel is now responsible for removing this S,G */
del_timer(&sg->timer);
star_mp = br_mdb_ip_get(src->br, &src->pg->key.addr);
if (!star_mp)
return;
br_multicast_sg_add_exclude_ports(star_mp, sg);
}
static void br_multicast_fwd_src_remove(struct net_bridge_group_src *src,
bool fastleave)
{
struct net_bridge_port_group *p, *pg = src->pg;
struct net_bridge_port_group __rcu **pp;
struct net_bridge_mdb_entry *mp;
struct br_ip sg_ip;
memset(&sg_ip, 0, sizeof(sg_ip));
sg_ip = pg->key.addr;
sg_ip.src = src->addr.src;
mp = br_mdb_ip_get(src->br, &sg_ip);
if (!mp)
return;
for (pp = &mp->ports;
(p = mlock_dereference(*pp, src->br)) != NULL;
pp = &p->next) {
if (!br_port_group_equal(p, pg->key.port, pg->eth_addr))
continue;
if (p->rt_protocol != RTPROT_KERNEL &&
(p->flags & MDB_PG_FLAGS_PERMANENT) &&
!(src->flags & BR_SGRP_F_USER_ADDED))
break;
if (fastleave)
p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
br_multicast_del_pg(mp, p, pp);
break;
}
src->flags &= ~BR_SGRP_F_INSTALLED;
}
/* install S,G and based on src's timer enable or disable forwarding */
static void br_multicast_fwd_src_handle(struct net_bridge_group_src *src)
{
struct net_bridge_port_group_sg_key sg_key;
struct net_bridge_port_group *sg;
u8 old_flags;
br_multicast_fwd_src_add(src);
memset(&sg_key, 0, sizeof(sg_key));
sg_key.addr = src->pg->key.addr;
sg_key.addr.src = src->addr.src;
sg_key.port = src->pg->key.port;
sg = br_sg_port_find(src->br, &sg_key);
if (!sg || (sg->flags & MDB_PG_FLAGS_PERMANENT))
return;
old_flags = sg->flags;
if (timer_pending(&src->timer))
sg->flags &= ~MDB_PG_FLAGS_BLOCKED;
else
sg->flags |= MDB_PG_FLAGS_BLOCKED;
if (old_flags != sg->flags) {
struct net_bridge_mdb_entry *sg_mp;
sg_mp = br_mdb_ip_get(src->br, &sg_key.addr);
if (!sg_mp)
return;
br_mdb_notify(src->br->dev, sg_mp, sg, RTM_NEWMDB);
}
}
static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc *gc)
{
struct net_bridge_mdb_entry *mp;
mp = container_of(gc, struct net_bridge_mdb_entry, mcast_gc);
WARN_ON(!hlist_unhashed(&mp->mdb_node));
WARN_ON(mp->ports);
timer_shutdown_sync(&mp->timer);
kfree_rcu(mp, rcu);
}
static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry *mp)
{
struct net_bridge *br = mp->br;
rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
br_mdb_rht_params);
hlist_del_init_rcu(&mp->mdb_node);
hlist_add_head(&mp->mcast_gc.gc_node, &br->mcast_gc_list);
queue_work(system_long_wq, &br->mcast_gc_work);
}
static void br_multicast_group_expired(struct timer_list *t)
{
struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer);
struct net_bridge *br = mp->br;
spin_lock(&br->multicast_lock);
if (hlist_unhashed(&mp->mdb_node) || !netif_running(br->dev) ||
timer_pending(&mp->timer))
goto out;
br_multicast_host_leave(mp, true);
if (mp->ports)
goto out;
br_multicast_del_mdb_entry(mp);
out:
spin_unlock(&br->multicast_lock);
}
static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc *gc)
{
struct net_bridge_group_src *src;
src = container_of(gc, struct net_bridge_group_src, mcast_gc);
WARN_ON(!hlist_unhashed(&src->node));
timer_shutdown_sync(&src->timer);
kfree_rcu(src, rcu);
}
void __br_multicast_del_group_src(struct net_bridge_group_src *src)
{
struct net_bridge *br = src->pg->key.port->br;
hlist_del_init_rcu(&src->node);
src->pg->src_ents--;
hlist_add_head(&src->mcast_gc.gc_node, &br->mcast_gc_list);
queue_work(system_long_wq, &br->mcast_gc_work);
}
void br_multicast_del_group_src(struct net_bridge_group_src *src,
bool fastleave)
{
br_multicast_fwd_src_remove(src, fastleave);
__br_multicast_del_group_src(src);
}
static int
br_multicast_port_ngroups_inc_one(struct net_bridge_mcast_port *pmctx,
struct netlink_ext_ack *extack,
const char *what)
{
u32 max = READ_ONCE(pmctx->mdb_max_entries);
u32 n = READ_ONCE(pmctx->mdb_n_entries);
if (max && n >= max) {
NL_SET_ERR_MSG_FMT_MOD(extack, "%s is already in %u groups, and mcast_max_groups=%u",
what, n, max);
return -E2BIG;
}
WRITE_ONCE(pmctx->mdb_n_entries, n + 1);
return 0;
}
static void br_multicast_port_ngroups_dec_one(struct net_bridge_mcast_port *pmctx)
{
u32 n = READ_ONCE(pmctx->mdb_n_entries);
WARN_ON_ONCE(n == 0);
WRITE_ONCE(pmctx->mdb_n_entries, n - 1);
}
static int br_multicast_port_ngroups_inc(struct net_bridge_port *port,
const struct br_ip *group,
struct netlink_ext_ack *extack)
{
struct net_bridge_mcast_port *pmctx;
int err;
lockdep_assert_held_once(&port->br->multicast_lock);
/* Always count on the port context. */
err = br_multicast_port_ngroups_inc_one(&port->multicast_ctx, extack,
"Port");
if (err) {
trace_br_mdb_full(port->dev, group);
return err;
}
/* Only count on the VLAN context if VID is given, and if snooping on
* that VLAN is enabled.
*/
if (!group->vid)
return 0;
pmctx = br_multicast_port_vid_to_port_ctx(port, group->vid);
if (!pmctx)
return 0;
err = br_multicast_port_ngroups_inc_one(pmctx, extack, "Port-VLAN");
if (err) {
trace_br_mdb_full(port->dev, group);
goto dec_one_out;
}
return 0;
dec_one_out:
br_multicast_port_ngroups_dec_one(&port->multicast_ctx);
return err;
}
static void br_multicast_port_ngroups_dec(struct net_bridge_port *port, u16 vid)
{
struct net_bridge_mcast_port *pmctx;
lockdep_assert_held_once(&port->br->multicast_lock);
if (vid) {
pmctx = br_multicast_port_vid_to_port_ctx(port, vid);
if (pmctx)
br_multicast_port_ngroups_dec_one(pmctx);
}
br_multicast_port_ngroups_dec_one(&port->multicast_ctx);
}
u32 br_multicast_ngroups_get(const struct net_bridge_mcast_port *pmctx)
{
return READ_ONCE(pmctx->mdb_n_entries);
}
void br_multicast_ngroups_set_max(struct net_bridge_mcast_port *pmctx, u32 max)
{
WRITE_ONCE(pmctx->mdb_max_entries, max);
}
u32 br_multicast_ngroups_get_max(const struct net_bridge_mcast_port *pmctx)
{
return READ_ONCE(pmctx->mdb_max_entries);
}
static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc)
{
struct net_bridge_port_group *pg;
pg = container_of(gc, struct net_bridge_port_group, mcast_gc);
WARN_ON(!hlist_unhashed(&pg->mglist));
WARN_ON(!hlist_empty(&pg->src_list));
timer_shutdown_sync(&pg->rexmit_timer);
timer_shutdown_sync(&pg->timer);
kfree_rcu(pg, rcu);
}
void br_multicast_del_pg(struct net_bridge_mdb_entry *mp,
struct net_bridge_port_group *pg,
struct net_bridge_port_group __rcu **pp)
{
struct net_bridge *br = pg->key.port->br;
struct net_bridge_group_src *ent;
struct hlist_node *tmp;
rcu_assign_pointer(*pp, pg->next);
hlist_del_init(&pg->mglist);
br_multicast_eht_clean_sets(pg);
hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
br_multicast_del_group_src(ent, false);
br_mdb_notify(br->dev, mp, pg, RTM_DELMDB);
if (!br_multicast_is_star_g(&mp->addr)) {
rhashtable_remove_fast(&br->sg_port_tbl, &pg->rhnode,
br_sg_port_rht_params);
br_multicast_sg_del_exclude_ports(mp);
} else {
br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
}
br_multicast_port_ngroups_dec(pg->key.port, pg->key.addr.vid);
hlist_add_head(&pg->mcast_gc.gc_node, &br->mcast_gc_list);
queue_work(system_long_wq, &br->mcast_gc_work);
if (!mp->ports && !mp->host_joined && netif_running(br->dev))
mod_timer(&mp->timer, jiffies);
}
static void br_multicast_find_del_pg(struct net_bridge *br,
struct net_bridge_port_group *pg)
{
struct net_bridge_port_group __rcu **pp;
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p;
mp = br_mdb_ip_get(br, &pg->key.addr);
if (WARN_ON(!mp))
return;
for (pp = &mp->ports;
(p = mlock_dereference(*pp, br)) != NULL;
pp = &p->next) {
if (p != pg)
continue;
br_multicast_del_pg(mp, pg, pp);
return;
}
WARN_ON(1);
}
static void br_multicast_port_group_expired(struct timer_list *t)
{
struct net_bridge_port_group *pg = from_timer(pg, t, timer);
struct net_bridge_group_src *src_ent;
struct net_bridge *br = pg->key.port->br;
struct hlist_node *tmp;
bool changed;
spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
goto out;
changed = !!(pg->filter_mode == MCAST_EXCLUDE);
pg->filter_mode = MCAST_INCLUDE;
hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) {
if (!timer_pending(&src_ent->timer)) {
br_multicast_del_group_src(src_ent, false);
changed = true;
}
}
if (hlist_empty(&pg->src_list)) {
br_multicast_find_del_pg(br, pg);
} else if (changed) {
struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, &pg->key.addr);
if (changed && br_multicast_is_star_g(&pg->key.addr))
br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
if (WARN_ON(!mp))
goto out;
br_mdb_notify(br->dev, mp, pg, RTM_NEWMDB);
}
out:
spin_unlock(&br->multicast_lock);
}
static void br_multicast_gc(struct hlist_head *head)
{
struct net_bridge_mcast_gc *gcent;
struct hlist_node *tmp;
hlist_for_each_entry_safe(gcent, tmp, head, gc_node) {
hlist_del_init(&gcent->gc_node);
gcent->destroy(gcent);
}
}
static void __br_multicast_query_handle_vlan(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct sk_buff *skb)
{
struct net_bridge_vlan *vlan = NULL;
if (pmctx && br_multicast_port_ctx_is_vlan(pmctx))
vlan = pmctx->vlan;
else if (br_multicast_ctx_is_vlan(brmctx))
vlan = brmctx->vlan;
if (vlan && !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED)) {
u16 vlan_proto;
if (br_vlan_get_proto(brmctx->br->dev, &vlan_proto) != 0)
return;
__vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan->vid);
}
}
static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct net_bridge_port_group *pg,
__be32 ip_dst, __be32 group,
bool with_srcs, bool over_lmqt,
u8 sflag, u8 *igmp_type,
bool *need_rexmit)
{
struct net_bridge_port *p = pg ? pg->key.port : NULL;
struct net_bridge_group_src *ent;
size_t pkt_size, igmp_hdr_size;
unsigned long now = jiffies;
struct igmpv3_query *ihv3;
void *csum_start = NULL;
__sum16 *csum = NULL;
struct sk_buff *skb;
struct igmphdr *ih;
struct ethhdr *eth;
unsigned long lmqt;
struct iphdr *iph;
u16 lmqt_srcs = 0;
igmp_hdr_size = sizeof(*ih);
if (brmctx->multicast_igmp_version == 3) {
igmp_hdr_size = sizeof(*ihv3);
if (pg && with_srcs) {
lmqt = now + (brmctx->multicast_last_member_interval *
brmctx->multicast_last_member_count);
hlist_for_each_entry(ent, &pg->src_list, node) {
if (over_lmqt == time_after(ent->timer.expires,
lmqt) &&
ent->src_query_rexmit_cnt > 0)
lmqt_srcs++;
}
if (!lmqt_srcs)
return NULL;
igmp_hdr_size += lmqt_srcs * sizeof(__be32);
}
}
pkt_size = sizeof(*eth) + sizeof(*iph) + 4 + igmp_hdr_size;
if ((p && pkt_size > p->dev->mtu) ||
pkt_size > brmctx->br->dev->mtu)
return NULL;
skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size);
if (!skb)
goto out;
__br_multicast_query_handle_vlan(brmctx, pmctx, skb);
skb->protocol = htons(ETH_P_IP);
skb_reset_mac_header(skb);
eth = eth_hdr(skb);
ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr);
ip_eth_mc_map(ip_dst, eth->h_dest);
eth->h_proto = htons(ETH_P_IP);
skb_put(skb, sizeof(*eth));
skb_set_network_header(skb, skb->len);
iph = ip_hdr(skb);
iph->tot_len = htons(pkt_size - sizeof(*eth));
iph->version = 4;
iph->ihl = 6;
iph->tos = 0xc0;
iph->id = 0;
iph->frag_off = htons(IP_DF);
iph->ttl = 1;
iph->protocol = IPPROTO_IGMP;
iph->saddr = br_opt_get(brmctx->br, BROPT_MULTICAST_QUERY_USE_IFADDR) ?
inet_select_addr(brmctx->br->dev, 0, RT_SCOPE_LINK) : 0;
iph->daddr = ip_dst;
((u8 *)&iph[1])[0] = IPOPT_RA;
((u8 *)&iph[1])[1] = 4;
((u8 *)&iph[1])[2] = 0;
((u8 *)&iph[1])[3] = 0;
ip_send_check(iph);
skb_put(skb, 24);
skb_set_transport_header(skb, skb->len);
*igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
switch (brmctx->multicast_igmp_version) {
case 2:
ih = igmp_hdr(skb);
ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
ih->code = (group ? brmctx->multicast_last_member_interval :
brmctx->multicast_query_response_interval) /
(HZ / IGMP_TIMER_SCALE);
ih->group = group;
ih->csum = 0;
csum = &ih->csum;
csum_start = (void *)ih;
break;
case 3:
ihv3 = igmpv3_query_hdr(skb);
ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
ihv3->code = (group ? brmctx->multicast_last_member_interval :
brmctx->multicast_query_response_interval) /
(HZ / IGMP_TIMER_SCALE);
ihv3->group = group;
ihv3->qqic = brmctx->multicast_query_interval / HZ;
ihv3->nsrcs = htons(lmqt_srcs);
ihv3->resv = 0;
ihv3->suppress = sflag;
ihv3->qrv = 2;
ihv3->csum = 0;
csum = &ihv3->csum;
csum_start = (void *)ihv3;
if (!pg || !with_srcs)
break;
lmqt_srcs = 0;
hlist_for_each_entry(ent, &pg->src_list, node) {
if (over_lmqt == time_after(ent->timer.expires,
lmqt) &&
ent->src_query_rexmit_cnt > 0) {
ihv3->srcs[lmqt_srcs++] = ent->addr.src.ip4;
ent->src_query_rexmit_cnt--;
if (need_rexmit && ent->src_query_rexmit_cnt)
*need_rexmit = true;
}
}
if (WARN_ON(lmqt_srcs != ntohs(ihv3->nsrcs))) {
kfree_skb(skb);
return NULL;
}
break;
}
if (WARN_ON(!csum || !csum_start)) {
kfree_skb(skb);
return NULL;
}
*csum = ip_compute_csum(csum_start, igmp_hdr_size);
skb_put(skb, igmp_hdr_size);
__skb_pull(skb, sizeof(*eth));
out:
return skb;
}
#if IS_ENABLED(CONFIG_IPV6)
static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct net_bridge_port_group *pg,
const struct in6_addr *ip6_dst,
const struct in6_addr *group,
bool with_srcs, bool over_llqt,
u8 sflag, u8 *igmp_type,
bool *need_rexmit)
{
struct net_bridge_port *p = pg ? pg->key.port : NULL;
struct net_bridge_group_src *ent;
size_t pkt_size, mld_hdr_size;
unsigned long now = jiffies;
struct mld2_query *mld2q;
void *csum_start = NULL;
unsigned long interval;
__sum16 *csum = NULL;
struct ipv6hdr *ip6h;
struct mld_msg *mldq;
struct sk_buff *skb;
unsigned long llqt;
struct ethhdr *eth;
u16 llqt_srcs = 0;
u8 *hopopt;
mld_hdr_size = sizeof(*mldq);
if (brmctx->multicast_mld_version == 2) {
mld_hdr_size = sizeof(*mld2q);
if (pg && with_srcs) {
llqt = now + (brmctx->multicast_last_member_interval *
brmctx->multicast_last_member_count);
hlist_for_each_entry(ent, &pg->src_list, node) {
if (over_llqt == time_after(ent->timer.expires,
llqt) &&
ent->src_query_rexmit_cnt > 0)
llqt_srcs++;
}
if (!llqt_srcs)
return NULL;
mld_hdr_size += llqt_srcs * sizeof(struct in6_addr);
}
}
pkt_size = sizeof(*eth) + sizeof(*ip6h) + 8 + mld_hdr_size;
if ((p && pkt_size > p->dev->mtu) ||
pkt_size > brmctx->br->dev->mtu)
return NULL;
skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size);
if (!skb)
goto out;
__br_multicast_query_handle_vlan(brmctx, pmctx, skb);
skb->protocol = htons(ETH_P_IPV6);
/* Ethernet header */
skb_reset_mac_header(skb);
eth = eth_hdr(skb);
ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr);
eth->h_proto = htons(ETH_P_IPV6);
skb_put(skb, sizeof(*eth));
/* IPv6 header + HbH option */
skb_set_network_header(skb, skb->len);
ip6h = ipv6_hdr(skb);
*(__force __be32 *)ip6h = htonl(0x60000000);
ip6h->payload_len = htons(8 + mld_hdr_size);
ip6h->nexthdr = IPPROTO_HOPOPTS;
ip6h->hop_limit = 1;
ip6h->daddr = *ip6_dst;
if (ipv6_dev_get_saddr(dev_net(brmctx->br->dev), brmctx->br->dev,
&ip6h->daddr, 0, &ip6h->saddr)) {
kfree_skb(skb);
br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, false);
return NULL;
}
br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, true);
ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
hopopt = (u8 *)(ip6h + 1);
hopopt[0] = IPPROTO_ICMPV6; /* next hdr */
hopopt[1] = 0; /* length of HbH */
hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */
hopopt[3] = 2; /* Length of RA Option */
hopopt[4] = 0; /* Type = 0x0000 (MLD) */
hopopt[5] = 0;
hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */
hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */
skb_put(skb, sizeof(*ip6h) + 8);
/* ICMPv6 */
skb_set_transport_header(skb, skb->len);
interval = ipv6_addr_any(group) ?
brmctx->multicast_query_response_interval :
brmctx->multicast_last_member_interval;
*igmp_type = ICMPV6_MGM_QUERY;
switch (brmctx->multicast_mld_version) {
case 1:
mldq = (struct mld_msg *)icmp6_hdr(skb);
mldq->mld_type = ICMPV6_MGM_QUERY;
mldq->mld_code = 0;
mldq->mld_cksum = 0;
mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
mldq->mld_reserved = 0;
mldq->mld_mca = *group;
csum = &mldq->mld_cksum;
csum_start = (void *)mldq;
break;
case 2:
mld2q = (struct mld2_query *)icmp6_hdr(skb);
mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval));
mld2q->mld2q_type = ICMPV6_MGM_QUERY;
mld2q->mld2q_code = 0;
mld2q->mld2q_cksum = 0;
mld2q->mld2q_resv1 = 0;
mld2q->mld2q_resv2 = 0;
mld2q->mld2q_suppress = sflag;
mld2q->mld2q_qrv = 2;
mld2q->mld2q_nsrcs = htons(llqt_srcs);
mld2q->mld2q_qqic = brmctx->multicast_query_interval / HZ;
mld2q->mld2q_mca = *group;
csum = &mld2q->mld2q_cksum;
csum_start = (void *)mld2q;
if (!pg || !with_srcs)
break;
llqt_srcs = 0;
hlist_for_each_entry(ent, &pg->src_list, node) {
if (over_llqt == time_after(ent->timer.expires,
llqt) &&
ent->src_query_rexmit_cnt > 0) {
mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.src.ip6;
ent->src_query_rexmit_cnt--;
if (need_rexmit && ent->src_query_rexmit_cnt)
*need_rexmit = true;
}
}
if (WARN_ON(llqt_srcs != ntohs(mld2q->mld2q_nsrcs))) {
kfree_skb(skb);
return NULL;
}
break;
}
if (WARN_ON(!csum || !csum_start)) {
kfree_skb(skb);
return NULL;
}
*csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, mld_hdr_size,
IPPROTO_ICMPV6,
csum_partial(csum_start, mld_hdr_size, 0));
skb_put(skb, mld_hdr_size);
__skb_pull(skb, sizeof(*eth));
out:
return skb;
}
#endif
static struct sk_buff *br_multicast_alloc_query(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct net_bridge_port_group *pg,
struct br_ip *ip_dst,
struct br_ip *group,
bool with_srcs, bool over_lmqt,
u8 sflag, u8 *igmp_type,
bool *need_rexmit)
{
__be32 ip4_dst;
switch (group->proto) {
case htons(ETH_P_IP):
ip4_dst = ip_dst ? ip_dst->dst.ip4 : htonl(INADDR_ALLHOSTS_GROUP);
return br_ip4_multicast_alloc_query(brmctx, pmctx, pg,
ip4_dst, group->dst.ip4,
with_srcs, over_lmqt,
sflag, igmp_type,
need_rexmit);
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6): {
struct in6_addr ip6_dst;
if (ip_dst)
ip6_dst = ip_dst->dst.ip6;
else
ipv6_addr_set(&ip6_dst, htonl(0xff020000), 0, 0,
htonl(1));
return br_ip6_multicast_alloc_query(brmctx, pmctx, pg,
&ip6_dst, &group->dst.ip6,
with_srcs, over_lmqt,
sflag, igmp_type,
need_rexmit);
}
#endif
}
return NULL;
}
struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
struct br_ip *group)
{
struct net_bridge_mdb_entry *mp;
int err;
mp = br_mdb_ip_get(br, group);
if (mp)
return mp;
if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
trace_br_mdb_full(br->dev, group);
br_mc_disabled_update(br->dev, false, NULL);
br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
return ERR_PTR(-E2BIG);
}
mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
if (unlikely(!mp))
return ERR_PTR(-ENOMEM);
mp->br = br;
mp->addr = *group;
mp->mcast_gc.destroy = br_multicast_destroy_mdb_entry;
timer_setup(&mp->timer, br_multicast_group_expired, 0);
err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode,
br_mdb_rht_params);
if (err) {
kfree(mp);
mp = ERR_PTR(err);
} else {
hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list);
}
return mp;
}
static void br_multicast_group_src_expired(struct timer_list *t)
{
struct net_bridge_group_src *src = from_timer(src, t, timer);
struct net_bridge_port_group *pg;
struct net_bridge *br = src->br;
spin_lock(&br->multicast_lock);
if (hlist_unhashed(&src->node) || !netif_running(br->dev) ||
timer_pending(&src->timer))
goto out;
pg = src->pg;
if (pg->filter_mode == MCAST_INCLUDE) {
br_multicast_del_group_src(src, false);
if (!hlist_empty(&pg->src_list))
goto out;
br_multicast_find_del_pg(br, pg);
} else {
br_multicast_fwd_src_handle(src);
}
out:
spin_unlock(&br->multicast_lock);
}
struct net_bridge_group_src *
br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip)
{
struct net_bridge_group_src *ent;
switch (ip->proto) {
case htons(ETH_P_IP):
hlist_for_each_entry(ent, &pg->src_list, node)
if (ip->src.ip4 == ent->addr.src.ip4)
return ent;
break;
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
hlist_for_each_entry(ent, &pg->src_list, node)
if (!ipv6_addr_cmp(&ent->addr.src.ip6, &ip->src.ip6))
return ent;
break;
#endif
}
return NULL;
}
struct net_bridge_group_src *
br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip)
{
struct net_bridge_group_src *grp_src;
if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT))
return NULL;
switch (src_ip->proto) {
case htons(ETH_P_IP):
if (ipv4_is_zeronet(src_ip->src.ip4) ||
ipv4_is_multicast(src_ip->src.ip4))
return NULL;
break;
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
if (ipv6_addr_any(&src_ip->src.ip6) ||
ipv6_addr_is_multicast(&src_ip->src.ip6))
return NULL;
break;
#endif
}
grp_src = kzalloc(sizeof(*grp_src), GFP_ATOMIC);
if (unlikely(!grp_src))
return NULL;
grp_src->pg = pg;
grp_src->br = pg->key.port->br;
grp_src->addr = *src_ip;
grp_src->mcast_gc.destroy = br_multicast_destroy_group_src;
timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0);
hlist_add_head_rcu(&grp_src->node, &pg->src_list);
pg->src_ents++;
return grp_src;
}
struct net_bridge_port_group *br_multicast_new_port_group(
struct net_bridge_port *port,
const struct br_ip *group,
struct net_bridge_port_group __rcu *next,
unsigned char flags,
const unsigned char *src,
u8 filter_mode,
u8 rt_protocol,
struct netlink_ext_ack *extack)
{
struct net_bridge_port_group *p;
int err;
err = br_multicast_port_ngroups_inc(port, group, extack);
if (err)
return NULL;
p = kzalloc(sizeof(*p), GFP_ATOMIC);
if (unlikely(!p)) {
NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate new port group");
goto dec_out;
}
p->key.addr = *group;
p->key.port = port;
p->flags = flags;
p->filter_mode = filter_mode;
p->rt_protocol = rt_protocol;
p->eht_host_tree = RB_ROOT;
p->eht_set_tree = RB_ROOT;
p->mcast_gc.destroy = br_multicast_destroy_port_group;
INIT_HLIST_HEAD(&p->src_list);
if (!br_multicast_is_star_g(group) &&
rhashtable_lookup_insert_fast(&port->br->sg_port_tbl, &p->rhnode,
br_sg_port_rht_params)) {
NL_SET_ERR_MSG_MOD(extack, "Couldn't insert new port group");
goto free_out;
}
rcu_assign_pointer(p->next, next);
timer_setup(&p->timer, br_multicast_port_group_expired, 0);
timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0);
hlist_add_head(&p->mglist, &port->mglist);
if (src)
memcpy(p->eth_addr, src, ETH_ALEN);
else
eth_broadcast_addr(p->eth_addr);
return p;
free_out:
kfree(p);
dec_out:
br_multicast_port_ngroups_dec(port, group->vid);
return NULL;
}
void br_multicast_del_port_group(struct net_bridge_port_group *p)
{
struct net_bridge_port *port = p->key.port;
__u16 vid = p->key.addr.vid;
hlist_del_init(&p->mglist);
if (!br_multicast_is_star_g(&p->key.addr))
rhashtable_remove_fast(&port->br->sg_port_tbl, &p->rhnode,
br_sg_port_rht_params);
kfree(p);
br_multicast_port_ngroups_dec(port, vid);
}
void br_multicast_host_join(const struct net_bridge_mcast *brmctx,
struct net_bridge_mdb_entry *mp, bool notify)
{
if (!mp->host_joined) {
mp->host_joined = true;
if (br_multicast_is_star_g(&mp->addr))
br_multicast_star_g_host_state(mp);
if (notify)
br_mdb_notify(mp->br->dev, mp, NULL, RTM_NEWMDB);
}
if (br_group_is_l2(&mp->addr))
return;
mod_timer(&mp->timer, jiffies + brmctx->multicast_membership_interval);
}
void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify)
{
if (!mp->host_joined)
return;
mp->host_joined = false;
if (br_multicast_is_star_g(&mp->addr))
br_multicast_star_g_host_state(mp);
if (notify)
br_mdb_notify(mp->br->dev, mp, NULL, RTM_DELMDB);
}
static struct net_bridge_port_group *
__br_multicast_add_group(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct br_ip *group,
const unsigned char *src,
u8 filter_mode,
bool igmpv2_mldv1,
bool blocked)
{
struct net_bridge_port_group __rcu **pp;
struct net_bridge_port_group *p = NULL;
struct net_bridge_mdb_entry *mp;
unsigned long now = jiffies;
if (!br_multicast_ctx_should_use(brmctx, pmctx))
goto out;
mp = br_multicast_new_group(brmctx->br, group);
if (IS_ERR(mp))
return ERR_CAST(mp);
if (!pmctx) {
br_multicast_host_join(brmctx, mp, true);
goto out;
}
for (pp = &mp->ports;
(p = mlock_dereference(*pp, brmctx->br)) != NULL;
pp = &p->next) {
if (br_port_group_equal(p, pmctx->port, src))
goto found;
if ((unsigned long)p->key.port < (unsigned long)pmctx->port)
break;
}
p = br_multicast_new_port_group(pmctx->port, group, *pp, 0, src,
filter_mode, RTPROT_KERNEL, NULL);
if (unlikely(!p)) {
p = ERR_PTR(-ENOMEM);
goto out;
}
rcu_assign_pointer(*pp, p);
if (blocked)
p->flags |= MDB_PG_FLAGS_BLOCKED;
br_mdb_notify(brmctx->br->dev, mp, p, RTM_NEWMDB);
found:
if (igmpv2_mldv1)
mod_timer(&p->timer,
now + brmctx->multicast_membership_interval);
out:
return p;
}
static int br_multicast_add_group(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct br_ip *group,
const unsigned char *src,
u8 filter_mode,
bool igmpv2_mldv1)
{
struct net_bridge_port_group *pg;
int err;
spin_lock(&brmctx->br->multicast_lock);
pg = __br_multicast_add_group(brmctx, pmctx, group, src, filter_mode,
igmpv2_mldv1, false);
/* NULL is considered valid for host joined groups */
err = PTR_ERR_OR_ZERO(pg);
spin_unlock(&brmctx->br->multicast_lock);
return err;
}
static int br_ip4_multicast_add_group(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
__be32 group,
__u16 vid,
const unsigned char *src,
bool igmpv2)
{
struct br_ip br_group;
u8 filter_mode;
if (ipv4_is_local_multicast(group))
return 0;
memset(&br_group, 0, sizeof(br_group));
br_group.dst.ip4 = group;
br_group.proto = htons(ETH_P_IP);
br_group.vid = vid;
filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE;
return br_multicast_add_group(brmctx, pmctx, &br_group, src,
filter_mode, igmpv2);
}
#if IS_ENABLED(CONFIG_IPV6)
static int br_ip6_multicast_add_group(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
const struct in6_addr *group,
__u16 vid,
const unsigned char *src,
bool mldv1)
{
struct br_ip br_group;
u8 filter_mode;
if (ipv6_addr_is_ll_all_nodes(group))
return 0;
memset(&br_group, 0, sizeof(br_group));
br_group.dst.ip6 = *group;
br_group.proto = htons(ETH_P_IPV6);
br_group.vid = vid;
filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE;
return br_multicast_add_group(brmctx, pmctx, &br_group, src,
filter_mode, mldv1);
}
#endif
static bool br_multicast_rport_del(struct hlist_node *rlist)
{
if (hlist_unhashed(rlist))
return false;
hlist_del_init_rcu(rlist);
return true;
}
static bool br_ip4_multicast_rport_del(struct net_bridge_mcast_port *pmctx)
{
return br_multicast_rport_del(&pmctx->ip4_rlist);
}
static bool br_ip6_multicast_rport_del(struct net_bridge_mcast_port *pmctx)
{
#if IS_ENABLED(CONFIG_IPV6)
return br_multicast_rport_del(&pmctx->ip6_rlist);
#else
return false;
#endif
}
static void br_multicast_router_expired(struct net_bridge_mcast_port *pmctx,
struct timer_list *t,
struct hlist_node *rlist)
{
struct net_bridge *br = pmctx->port->br;
bool del;
spin_lock(&br->multicast_lock);
if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
pmctx->multicast_router == MDB_RTR_TYPE_PERM ||
timer_pending(t))
goto out;
del = br_multicast_rport_del(rlist);
br_multicast_rport_del_notify(pmctx, del);
out:
spin_unlock(&br->multicast_lock);
}
static void br_ip4_multicast_router_expired(struct timer_list *t)
{
struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
ip4_mc_router_timer);
br_multicast_router_expired(pmctx, t, &pmctx->ip4_rlist);
}
#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_router_expired(struct timer_list *t)
{
struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
ip6_mc_router_timer);
br_multicast_router_expired(pmctx, t, &pmctx->ip6_rlist);
}
#endif
static void br_mc_router_state_change(struct net_bridge *p,
bool is_mc_router)
{
struct switchdev_attr attr = {
.orig_dev = p->dev,
.id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
.flags = SWITCHDEV_F_DEFER,
.u.mrouter = is_mc_router,
};
switchdev_port_attr_set(p->dev, &attr, NULL);
}
static void br_multicast_local_router_expired(struct net_bridge_mcast *brmctx,
struct timer_list *timer)
{
spin_lock(&brmctx->br->multicast_lock);
if (brmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
brmctx->multicast_router == MDB_RTR_TYPE_PERM ||
br_ip4_multicast_is_router(brmctx) ||
br_ip6_multicast_is_router(brmctx))
goto out;
br_mc_router_state_change(brmctx->br, false);
out:
spin_unlock(&brmctx->br->multicast_lock);
}
static void br_ip4_multicast_local_router_expired(struct timer_list *t)
{
struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
ip4_mc_router_timer);
br_multicast_local_router_expired(brmctx, t);
}
#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_local_router_expired(struct timer_list *t)
{
struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
ip6_mc_router_timer);
br_multicast_local_router_expired(brmctx, t);
}
#endif
static void br_multicast_querier_expired(struct net_bridge_mcast *brmctx,
struct bridge_mcast_own_query *query)
{
spin_lock(&brmctx->br->multicast_lock);
if (!netif_running(brmctx->br->dev) ||
br_multicast_ctx_vlan_global_disabled(brmctx) ||
!br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
goto out;
br_multicast_start_querier(brmctx, query);
out:
spin_unlock(&brmctx->br->multicast_lock);
}
static void br_ip4_multicast_querier_expired(struct timer_list *t)
{
struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
ip4_other_query.timer);
br_multicast_querier_expired(brmctx, &brmctx->ip4_own_query);
}
#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_querier_expired(struct timer_list *t)
{
struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
ip6_other_query.timer);
br_multicast_querier_expired(brmctx, &brmctx->ip6_own_query);
}
#endif
static void br_multicast_select_own_querier(struct net_bridge_mcast *brmctx,
struct br_ip *ip,
struct sk_buff *skb)
{
if (ip->proto == htons(ETH_P_IP))
brmctx->ip4_querier.addr.src.ip4 = ip_hdr(skb)->saddr;
#if IS_ENABLED(CONFIG_IPV6)
else
brmctx->ip6_querier.addr.src.ip6 = ipv6_hdr(skb)->saddr;
#endif
}
static void __br_multicast_send_query(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct net_bridge_port_group *pg,
struct br_ip *ip_dst,
struct br_ip *group,
bool with_srcs,
u8 sflag,
bool *need_rexmit)
{
bool over_lmqt = !!sflag;
struct sk_buff *skb;
u8 igmp_type;
if (!br_multicast_ctx_should_use(brmctx, pmctx) ||
!br_multicast_ctx_matches_vlan_snooping(brmctx))
return;
again_under_lmqt:
skb = br_multicast_alloc_query(brmctx, pmctx, pg, ip_dst, group,
with_srcs, over_lmqt, sflag, &igmp_type,
need_rexmit);
if (!skb)
return;
if (pmctx) {
skb->dev = pmctx->port->dev;
br_multicast_count(brmctx->br, pmctx->port, skb, igmp_type,
BR_MCAST_DIR_TX);
NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
dev_net(pmctx->port->dev), NULL, skb, NULL, skb->dev,
br_dev_queue_push_xmit);
if (over_lmqt && with_srcs && sflag) {
over_lmqt = false;
goto again_under_lmqt;
}
} else {
br_multicast_select_own_querier(brmctx, group, skb);
br_multicast_count(brmctx->br, NULL, skb, igmp_type,
BR_MCAST_DIR_RX);
netif_rx(skb);
}
}
static void br_multicast_read_querier(const struct bridge_mcast_querier *querier,
struct bridge_mcast_querier *dest)
{
unsigned int seq;
memset(dest, 0, sizeof(*dest));
do {
seq = read_seqcount_begin(&querier->seq);
dest->port_ifidx = querier->port_ifidx;
memcpy(&dest->addr, &querier->addr, sizeof(struct br_ip));
} while (read_seqcount_retry(&querier->seq, seq));
}
static void br_multicast_update_querier(struct net_bridge_mcast *brmctx,
struct bridge_mcast_querier *querier,
int ifindex,
struct br_ip *saddr)
{
write_seqcount_begin(&querier->seq);
querier->port_ifidx = ifindex;
memcpy(&querier->addr, saddr, sizeof(*saddr));
write_seqcount_end(&querier->seq);
}
static void br_multicast_send_query(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct bridge_mcast_own_query *own_query)
{
struct bridge_mcast_other_query *other_query = NULL;
struct bridge_mcast_querier *querier;
struct br_ip br_group;
unsigned long time;
if (!br_multicast_ctx_should_use(brmctx, pmctx) ||
!br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) ||
!brmctx->multicast_querier)
return;
memset(&br_group.dst, 0, sizeof(br_group.dst));
if (pmctx ? (own_query == &pmctx->ip4_own_query) :
(own_query == &brmctx->ip4_own_query)) {
querier = &brmctx->ip4_querier;
other_query = &brmctx->ip4_other_query;
br_group.proto = htons(ETH_P_IP);
#if IS_ENABLED(CONFIG_IPV6)
} else {
querier = &brmctx->ip6_querier;
other_query = &brmctx->ip6_other_query;
br_group.proto = htons(ETH_P_IPV6);
#endif
}
if (!other_query || timer_pending(&other_query->timer))
return;
/* we're about to select ourselves as querier */
if (!pmctx && querier->port_ifidx) {
struct br_ip zeroip = {};
br_multicast_update_querier(brmctx, querier, 0, &zeroip);
}
__br_multicast_send_query(brmctx, pmctx, NULL, NULL, &br_group, false,
0, NULL);
time = jiffies;
time += own_query->startup_sent < brmctx->multicast_startup_query_count ?
brmctx->multicast_startup_query_interval :
brmctx->multicast_query_interval;
mod_timer(&own_query->timer, time);
}
static void
br_multicast_port_query_expired(struct net_bridge_mcast_port *pmctx,
struct bridge_mcast_own_query *query)
{
struct net_bridge *br = pmctx->port->br;
struct net_bridge_mcast *brmctx;
spin_lock(&br->multicast_lock);
if (br_multicast_port_ctx_state_stopped(pmctx))
goto out;
brmctx = br_multicast_port_ctx_get_global(pmctx);
if (query->startup_sent < brmctx->multicast_startup_query_count)
query->startup_sent++;
br_multicast_send_query(brmctx, pmctx, query);
out:
spin_unlock(&br->multicast_lock);
}
static void br_ip4_multicast_port_query_expired(struct timer_list *t)
{
struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
ip4_own_query.timer);
br_multicast_port_query_expired(pmctx, &pmctx->ip4_own_query);
}
#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_port_query_expired(struct timer_list *t)
{
struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
ip6_own_query.timer);
br_multicast_port_query_expired(pmctx, &pmctx->ip6_own_query);
}
#endif
static void br_multicast_port_group_rexmit(struct timer_list *t)
{
struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer);
struct bridge_mcast_other_query *other_query = NULL;
struct net_bridge *br = pg->key.port->br;
struct net_bridge_mcast_port *pmctx;
struct net_bridge_mcast *brmctx;
bool need_rexmit = false;
spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
!br_opt_get(br, BROPT_MULTICAST_ENABLED))
goto out;
pmctx = br_multicast_pg_to_port_ctx(pg);
if (!pmctx)
goto out;
brmctx = br_multicast_port_ctx_get_global(pmctx);
if (!brmctx->multicast_querier)
goto out;
if (pg->key.addr.proto == htons(ETH_P_IP))
other_query = &brmctx->ip4_other_query;
#if IS_ENABLED(CONFIG_IPV6)
else
other_query = &brmctx->ip6_other_query;
#endif
if (!other_query || timer_pending(&other_query->timer))
goto out;
if (pg->grp_query_rexmit_cnt) {
pg->grp_query_rexmit_cnt--;
__br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
&pg->key.addr, false, 1, NULL);
}
__br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
&pg->key.addr, true, 0, &need_rexmit);
if (pg->grp_query_rexmit_cnt || need_rexmit)
mod_timer(&pg->rexmit_timer, jiffies +
brmctx->multicast_last_member_interval);
out:
spin_unlock(&br->multicast_lock);
}
static int br_mc_disabled_update(struct net_device *dev, bool value,
struct netlink_ext_ack *extack)
{
struct switchdev_attr attr = {
.orig_dev = dev,
.id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
.flags = SWITCHDEV_F_DEFER,
.u.mc_disabled = !value,
};
return switchdev_port_attr_set(dev, &attr, extack);
}
void br_multicast_port_ctx_init(struct net_bridge_port *port,
struct net_bridge_vlan *vlan,
struct net_bridge_mcast_port *pmctx)
{
pmctx->port = port;
pmctx->vlan = vlan;
pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
timer_setup(&pmctx->ip4_mc_router_timer,
br_ip4_multicast_router_expired, 0);
timer_setup(&pmctx->ip4_own_query.timer,
br_ip4_multicast_port_query_expired, 0);
#if IS_ENABLED(CONFIG_IPV6)
timer_setup(&pmctx->ip6_mc_router_timer,
br_ip6_multicast_router_expired, 0);
timer_setup(&pmctx->ip6_own_query.timer,
br_ip6_multicast_port_query_expired, 0);
#endif
}
void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pmctx)
{
#if IS_ENABLED(CONFIG_IPV6)
del_timer_sync(&pmctx->ip6_mc_router_timer);
#endif
del_timer_sync(&pmctx->ip4_mc_router_timer);
}
int br_multicast_add_port(struct net_bridge_port *port)
{
int err;
port->multicast_eht_hosts_limit = BR_MCAST_DEFAULT_EHT_HOSTS_LIMIT;
br_multicast_port_ctx_init(port, NULL, &port->multicast_ctx);
err = br_mc_disabled_update(port->dev,
br_opt_get(port->br,
BROPT_MULTICAST_ENABLED),
NULL);
if (err && err != -EOPNOTSUPP)
return err;
port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
if (!port->mcast_stats)
return -ENOMEM;
return 0;
}
void br_multicast_del_port(struct net_bridge_port *port)
{
struct net_bridge *br = port->br;
struct net_bridge_port_group *pg;
HLIST_HEAD(deleted_head);
struct hlist_node *n;
/* Take care of the remaining groups, only perm ones should be left */
spin_lock_bh(&br->multicast_lock);
hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
br_multicast_find_del_pg(br, pg);
hlist_move_list(&br->mcast_gc_list, &deleted_head);
spin_unlock_bh(&br->multicast_lock);
br_multicast_gc(&deleted_head);
br_multicast_port_ctx_deinit(&port->multicast_ctx);
free_percpu(port->mcast_stats);
}
static void br_multicast_enable(struct bridge_mcast_own_query *query)
{
query->startup_sent = 0;
if (try_to_del_timer_sync(&query->timer) >= 0 ||
del_timer(&query->timer))
mod_timer(&query->timer, jiffies);
}
static void __br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx)
{
struct net_bridge *br = pmctx->port->br;
struct net_bridge_mcast *brmctx;
brmctx = br_multicast_port_ctx_get_global(pmctx);
if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
!netif_running(br->dev))
return;
br_multicast_enable(&pmctx->ip4_own_query);
#if IS_ENABLED(CONFIG_IPV6)
br_multicast_enable(&pmctx->ip6_own_query);
#endif
if (pmctx->multicast_router == MDB_RTR_TYPE_PERM) {
br_ip4_multicast_add_router(brmctx, pmctx);
br_ip6_multicast_add_router(brmctx, pmctx);
}
if (br_multicast_port_ctx_is_vlan(pmctx)) {
struct net_bridge_port_group *pg;
u32 n = 0;
/* The mcast_n_groups counter might be wrong. First,
* BR_VLFLAG_MCAST_ENABLED is toggled before temporary entries
* are flushed, thus mcast_n_groups after the toggle does not
* reflect the true values. And second, permanent entries added
* while BR_VLFLAG_MCAST_ENABLED was disabled, are not reflected
* either. Thus we have to refresh the counter.
*/
hlist_for_each_entry(pg, &pmctx->port->mglist, mglist) {
if (pg->key.addr.vid == pmctx->vlan->vid)
n++;
}
WRITE_ONCE(pmctx->mdb_n_entries, n);
}
}
void br_multicast_enable_port(struct net_bridge_port *port)
{
struct net_bridge *br = port->br;
spin_lock_bh(&br->multicast_lock);
__br_multicast_enable_port_ctx(&port->multicast_ctx);
spin_unlock_bh(&br->multicast_lock);
}
static void __br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx)
{
struct net_bridge_port_group *pg;
struct hlist_node *n;
bool del = false;
hlist_for_each_entry_safe(pg, n, &pmctx->port->mglist, mglist)
if (!(pg->flags & MDB_PG_FLAGS_PERMANENT) &&
(!br_multicast_port_ctx_is_vlan(pmctx) ||
pg->key.addr.vid == pmctx->vlan->vid))
br_multicast_find_del_pg(pmctx->port->br, pg);
del |= br_ip4_multicast_rport_del(pmctx);
del_timer(&pmctx->ip4_mc_router_timer);
del_timer(&pmctx->ip4_own_query.timer);
del |= br_ip6_multicast_rport_del(pmctx);
#if IS_ENABLED(CONFIG_IPV6)
del_timer(&pmctx->ip6_mc_router_timer);
del_timer(&pmctx->ip6_own_query.timer);
#endif
br_multicast_rport_del_notify(pmctx, del);
}
void br_multicast_disable_port(struct net_bridge_port *port)
{
spin_lock_bh(&port->br->multicast_lock);
__br_multicast_disable_port_ctx(&port->multicast_ctx);
spin_unlock_bh(&port->br->multicast_lock);
}
static int __grp_src_delete_marked(struct net_bridge_port_group *pg)
{
struct net_bridge_group_src *ent;
struct hlist_node *tmp;
int deleted = 0;
hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
if (ent->flags & BR_SGRP_F_DELETE) {
br_multicast_del_group_src(ent, false);
deleted++;
}
return deleted;
}
static void __grp_src_mod_timer(struct net_bridge_group_src *src,
unsigned long expires)
{
mod_timer(&src->timer, expires);
br_multicast_fwd_src_handle(src);
}
static void __grp_src_query_marked_and_rexmit(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct net_bridge_port_group *pg)
{
struct bridge_mcast_other_query *other_query = NULL;
u32 lmqc = brmctx->multicast_last_member_count;
unsigned long lmqt, lmi, now = jiffies;
struct net_bridge_group_src *ent;
if (!netif_running(brmctx->br->dev) ||
!br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
return;
if (pg->key.addr.proto == htons(ETH_P_IP))
other_query = &brmctx->ip4_other_query;
#if IS_ENABLED(CONFIG_IPV6)
else
other_query = &brmctx->ip6_other_query;
#endif
lmqt = now + br_multicast_lmqt(brmctx);
hlist_for_each_entry(ent, &pg->src_list, node) {
if (ent->flags & BR_SGRP_F_SEND) {
ent->flags &= ~BR_SGRP_F_SEND;
if (ent->timer.expires > lmqt) {
if (brmctx->multicast_querier &&
other_query &&
!timer_pending(&other_query->timer))
ent->src_query_rexmit_cnt = lmqc;
__grp_src_mod_timer(ent, lmqt);
}
}
}
if (!brmctx->multicast_querier ||
!other_query || timer_pending(&other_query->timer))
return;
__br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
&pg->key.addr, true, 1, NULL);
lmi = now + brmctx->multicast_last_member_interval;
if (!timer_pending(&pg->rexmit_timer) ||
time_after(pg->rexmit_timer.expires, lmi))
mod_timer(&pg->rexmit_timer, lmi);
}
static void __grp_send_query_and_rexmit(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct net_bridge_port_group *pg)
{
struct bridge_mcast_other_query *other_query = NULL;
unsigned long now = jiffies, lmi;
if (!netif_running(brmctx->br->dev) ||
!br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
return;
if (pg->key.addr.proto == htons(ETH_P_IP))
other_query = &brmctx->ip4_other_query;
#if IS_ENABLED(CONFIG_IPV6)
else
other_query = &brmctx->ip6_other_query;
#endif
if (brmctx->multicast_querier &&
other_query && !timer_pending(&other_query->timer)) {
lmi = now + brmctx->multicast_last_member_interval;
pg->grp_query_rexmit_cnt = brmctx->multicast_last_member_count - 1;
__br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
&pg->key.addr, false, 0, NULL);
if (!timer_pending(&pg->rexmit_timer) ||
time_after(pg->rexmit_timer.expires, lmi))
mod_timer(&pg->rexmit_timer, lmi);
}
if (pg->filter_mode == MCAST_EXCLUDE &&
(!timer_pending(&pg->timer) ||
time_after(pg->timer.expires, now + br_multicast_lmqt(brmctx))))
mod_timer(&pg->timer, now + br_multicast_lmqt(brmctx));
}
/* State Msg type New state Actions
* INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI
* INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI
* EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI
*/
static bool br_multicast_isinc_allow(const struct net_bridge_mcast *brmctx,
struct net_bridge_port_group *pg, void *h_addr,
void *srcs, u32 nsrcs, size_t addr_size,
int grec_type)
{
struct net_bridge_group_src *ent;
unsigned long now = jiffies;
bool changed = false;
struct br_ip src_ip;
u32 src_idx;
memset(&src_ip, 0, sizeof(src_ip));
src_ip.proto = pg->key.addr.proto;
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
ent = br_multicast_find_group_src(pg, &src_ip);
if (!ent) {
ent = br_multicast_new_group_src(pg, &src_ip);
if (ent)
changed = true;
}
if (ent)
__grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
}
if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
grec_type))
changed = true;
return changed;
}
/* State Msg type New state Actions
* INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
* Delete (A-B)
* Group Timer=GMI
*/
static void __grp_src_isexc_incl(const struct net_bridge_mcast *brmctx,
struct net_bridge_port_group *pg, void *h_addr,
void *srcs, u32 nsrcs, size_t addr_size,
int grec_type)
{
struct net_bridge_group_src *ent;
struct br_ip src_ip;
u32 src_idx;
hlist_for_each_entry(ent, &pg->src_list, node)
ent->flags |= BR_SGRP_F_DELETE;
memset(&src_ip, 0, sizeof(src_ip));
src_ip.proto = pg->key.addr.proto;
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
ent = br_multicast_find_group_src(pg, &src_ip);
if (ent)
ent->flags &= ~BR_SGRP_F_DELETE;
else
ent = br_multicast_new_group_src(pg, &src_ip);
if (ent)
br_multicast_fwd_src_handle(ent);
}
br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
grec_type);
__grp_src_delete_marked(pg);
}
/* State Msg type New state Actions
* EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI
* Delete (X-A)
* Delete (Y-A)
* Group Timer=GMI
*/
static bool __grp_src_isexc_excl(const struct net_bridge_mcast *brmctx,
struct net_bridge_port_group *pg, void *h_addr,
void *srcs, u32 nsrcs, size_t addr_size,
int grec_type)
{
struct net_bridge_group_src *ent;
unsigned long now = jiffies;
bool changed = false;
struct br_ip src_ip;
u32 src_idx;
hlist_for_each_entry(ent, &pg->src_list, node)
ent->flags |= BR_SGRP_F_DELETE;
memset(&src_ip, 0, sizeof(src_ip));
src_ip.proto = pg->key.addr.proto;
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
ent = br_multicast_find_group_src(pg, &src_ip);
if (ent) {
ent->flags &= ~BR_SGRP_F_DELETE;
} else {
ent = br_multicast_new_group_src(pg, &src_ip);
if (ent) {
__grp_src_mod_timer(ent,
now + br_multicast_gmi(brmctx));
changed = true;
}
}
}
if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
grec_type))
changed = true;
if (__grp_src_delete_marked(pg))
changed = true;
return changed;
}
static bool br_multicast_isexc(const struct net_bridge_mcast *brmctx,
struct net_bridge_port_group *pg, void *h_addr,
void *srcs, u32 nsrcs, size_t addr_size,
int grec_type)
{
bool changed = false;
switch (pg->filter_mode) {
case MCAST_INCLUDE:
__grp_src_isexc_incl(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
grec_type);
br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
changed = true;
break;
case MCAST_EXCLUDE:
changed = __grp_src_isexc_excl(brmctx, pg, h_addr, srcs, nsrcs,
addr_size, grec_type);
break;
}
pg->filter_mode = MCAST_EXCLUDE;
mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx));
return changed;
}
/* State Msg type New state Actions
* INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI
* Send Q(G,A-B)
*/
static bool __grp_src_toin_incl(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct net_bridge_port_group *pg, void *h_addr,
void *srcs, u32 nsrcs, size_t addr_size,
int grec_type)
{
u32 src_idx, to_send = pg->src_ents;
struct net_bridge_group_src *ent;
unsigned long now = jiffies;
bool changed = false;
struct br_ip src_ip;
hlist_for_each_entry(ent, &pg->src_list, node)
ent->flags |= BR_SGRP_F_SEND;
memset(&src_ip, 0, sizeof(src_ip));
src_ip.proto = pg->key.addr.proto;
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
ent = br_multicast_find_group_src(pg, &src_ip);
if (ent) {
ent->flags &= ~BR_SGRP_F_SEND;
to_send--;
} else {
ent = br_multicast_new_group_src(pg, &src_ip);
if (ent)
changed = true;
}
if (ent)
__grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
}
if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
grec_type))
changed = true;
if (to_send)
__grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
return changed;
}
/* State Msg type New state Actions
* EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI
* Send Q(G,X-A)
* Send Q(G)
*/
static bool __grp_src_toin_excl(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct net_bridge_port_group *pg, void *h_addr,
void *srcs, u32 nsrcs, size_t addr_size,
int grec_type)
{
u32 src_idx, to_send = pg->src_ents;
struct net_bridge_group_src *ent;
unsigned long now = jiffies;
bool changed = false;
struct br_ip src_ip;
hlist_for_each_entry(ent, &pg->src_list, node)
if (timer_pending(&ent->timer))
ent->flags |= BR_SGRP_F_SEND;
memset(&src_ip, 0, sizeof(src_ip));
src_ip.proto = pg->key.addr.proto;
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
ent = br_multicast_find_group_src(pg, &src_ip);
if (ent) {
if (timer_pending(&ent->timer)) {
ent->flags &= ~BR_SGRP_F_SEND;
to_send--;
}
} else {
ent = br_multicast_new_group_src(pg, &src_ip);
if (ent)
changed = true;
}
if (ent)
__grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
}
if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
grec_type))
changed = true;
if (to_send)
__grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
__grp_send_query_and_rexmit(brmctx, pmctx, pg);
return changed;
}
static bool br_multicast_toin(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct net_bridge_port_group *pg, void *h_addr,
void *srcs, u32 nsrcs, size_t addr_size,
int grec_type)
{
bool changed = false;
switch (pg->filter_mode) {
case MCAST_INCLUDE:
changed = __grp_src_toin_incl(brmctx, pmctx, pg, h_addr, srcs,
nsrcs, addr_size, grec_type);
break;
case MCAST_EXCLUDE:
changed = __grp_src_toin_excl(brmctx, pmctx, pg, h_addr, srcs,
nsrcs, addr_size, grec_type);
break;
}
if (br_multicast_eht_should_del_pg(pg)) {
pg->flags |= MDB_PG_FLAGS_FAST_LEAVE;
br_multicast_find_del_pg(pg->key.port->br, pg);
/* a notification has already been sent and we shouldn't
* access pg after the delete so we have to return false
*/
changed = false;
}
return changed;
}
/* State Msg type New state Actions
* INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
* Delete (A-B)
* Send Q(G,A*B)
* Group Timer=GMI
*/
static void __grp_src_toex_incl(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct net_bridge_port_group *pg, void *h_addr,
void *srcs, u32 nsrcs, size_t addr_size,
int grec_type)
{
struct net_bridge_group_src *ent;
u32 src_idx, to_send = 0;
struct br_ip src_ip;
hlist_for_each_entry(ent, &pg->src_list, node)
ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
memset(&src_ip, 0, sizeof(src_ip));
src_ip.proto = pg->key.addr.proto;
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
ent = br_multicast_find_group_src(pg, &src_ip);
if (ent) {
ent->flags = (ent->flags & ~BR_SGRP_F_DELETE) |
BR_SGRP_F_SEND;
to_send++;
} else {
ent = br_multicast_new_group_src(pg, &src_ip);
}
if (ent)
br_multicast_fwd_src_handle(ent);
}
br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
grec_type);
__grp_src_delete_marked(pg);
if (to_send)
__grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
}
/* State Msg type New state Actions
* EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer
* Delete (X-A)
* Delete (Y-A)
* Send Q(G,A-Y)
* Group Timer=GMI
*/
static bool __grp_src_toex_excl(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct net_bridge_port_group *pg, void *h_addr,
void *srcs, u32 nsrcs, size_t addr_size,
int grec_type)
{
struct net_bridge_group_src *ent;
u32 src_idx, to_send = 0;
bool changed = false;
struct br_ip src_ip;
hlist_for_each_entry(ent, &pg->src_list, node)
ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
memset(&src_ip, 0, sizeof(src_ip));
src_ip.proto = pg->key.addr.proto;
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
ent = br_multicast_find_group_src(pg, &src_ip);
if (ent) {
ent->flags &= ~BR_SGRP_F_DELETE;
} else {
ent = br_multicast_new_group_src(pg, &src_ip);
if (ent) {
__grp_src_mod_timer(ent, pg->timer.expires);
changed = true;
}
}
if (ent && timer_pending(&ent->timer)) {
ent->flags |= BR_SGRP_F_SEND;
to_send++;
}
}
if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
grec_type))
changed = true;
if (__grp_src_delete_marked(pg))
changed = true;
if (to_send)
__grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
return changed;
}
static bool br_multicast_toex(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct net_bridge_port_group *pg, void *h_addr,
void *srcs, u32 nsrcs, size_t addr_size,
int grec_type)
{
bool changed = false;
switch (pg->filter_mode) {
case MCAST_INCLUDE:
__grp_src_toex_incl(brmctx, pmctx, pg, h_addr, srcs, nsrcs,
addr_size, grec_type);
br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
changed = true;
break;
case MCAST_EXCLUDE:
changed = __grp_src_toex_excl(brmctx, pmctx, pg, h_addr, srcs,
nsrcs, addr_size, grec_type);
break;
}
pg->filter_mode = MCAST_EXCLUDE;
mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx));
return changed;
}
/* State Msg type New state Actions
* INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B)
*/
static bool __grp_src_block_incl(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct net_bridge_port_group *pg, void *h_addr,
void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
{
struct net_bridge_group_src *ent;
u32 src_idx, to_send = 0;
bool changed = false;
struct br_ip src_ip;
hlist_for_each_entry(ent, &pg->src_list, node)
ent->flags &= ~BR_SGRP_F_SEND;
memset(&src_ip, 0, sizeof(src_ip));
src_ip.proto = pg->key.addr.proto;
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
ent = br_multicast_find_group_src(pg, &src_ip);
if (ent) {
ent->flags |= BR_SGRP_F_SEND;
to_send++;
}
}
if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
grec_type))
changed = true;
if (to_send)
__grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
return changed;
}
/* State Msg type New state Actions
* EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer
* Send Q(G,A-Y)
*/
static bool __grp_src_block_excl(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct net_bridge_port_group *pg, void *h_addr,
void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
{
struct net_bridge_group_src *ent;
u32 src_idx, to_send = 0;
bool changed = false;
struct br_ip src_ip;
hlist_for_each_entry(ent, &pg->src_list, node)
ent->flags &= ~BR_SGRP_F_SEND;
memset(&src_ip, 0, sizeof(src_ip));
src_ip.proto = pg->key.addr.proto;
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
ent = br_multicast_find_group_src(pg, &src_ip);
if (!ent) {
ent = br_multicast_new_group_src(pg, &src_ip);
if (ent) {
__grp_src_mod_timer(ent, pg->timer.expires);
changed = true;
}
}
if (ent && timer_pending(&ent->timer)) {
ent->flags |= BR_SGRP_F_SEND;
to_send++;
}
}
if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
grec_type))
changed = true;
if (to_send)
__grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
return changed;
}
static bool br_multicast_block(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct net_bridge_port_group *pg, void *h_addr,
void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
{
bool changed = false;
switch (pg->filter_mode) {
case MCAST_INCLUDE:
changed = __grp_src_block_incl(brmctx, pmctx, pg, h_addr, srcs,
nsrcs, addr_size, grec_type);
break;
case MCAST_EXCLUDE:
changed = __grp_src_block_excl(brmctx, pmctx, pg, h_addr, srcs,
nsrcs, addr_size, grec_type);
break;
}
if ((pg->filter_mode == MCAST_INCLUDE && hlist_empty(&pg->src_list)) ||
br_multicast_eht_should_del_pg(pg)) {
if (br_multicast_eht_should_del_pg(pg))
pg->flags |= MDB_PG_FLAGS_FAST_LEAVE;
br_multicast_find_del_pg(pg->key.port->br, pg);
/* a notification has already been sent and we shouldn't
* access pg after the delete so we have to return false
*/
changed = false;
}
return changed;
}
static struct net_bridge_port_group *
br_multicast_find_port(struct net_bridge_mdb_entry *mp,
struct net_bridge_port *p,
const unsigned char *src)
{
struct net_bridge *br __maybe_unused = mp->br;
struct net_bridge_port_group *pg;
for (pg = mlock_dereference(mp->ports, br);
pg;
pg = mlock_dereference(pg->next, br))
if (br_port_group_equal(pg, p, src))
return pg;
return NULL;
}
static int br_ip4_multicast_igmp3_report(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct sk_buff *skb,
u16 vid)
{
bool igmpv2 = brmctx->multicast_igmp_version == 2;
struct net_bridge_mdb_entry *mdst;
struct net_bridge_port_group *pg;
const unsigned char *src;
struct igmpv3_report *ih;
struct igmpv3_grec *grec;
int i, len, num, type;
__be32 group, *h_addr;
bool changed = false;
int err = 0;
u16 nsrcs;
ih = igmpv3_report_hdr(skb);
num = ntohs(ih->ngrec);
len = skb_transport_offset(skb) + sizeof(*ih);
for (i = 0; i < num; i++) {
len += sizeof(*grec);
if (!ip_mc_may_pull(skb, len))
return -EINVAL;
grec = (void *)(skb->data + len - sizeof(*grec));
group = grec->grec_mca;
type = grec->grec_type;
nsrcs = ntohs(grec->grec_nsrcs);
len += nsrcs * 4;
if (!ip_mc_may_pull(skb, len))
return -EINVAL;
switch (type) {
case IGMPV3_MODE_IS_INCLUDE:
case IGMPV3_MODE_IS_EXCLUDE:
case IGMPV3_CHANGE_TO_INCLUDE:
case IGMPV3_CHANGE_TO_EXCLUDE:
case IGMPV3_ALLOW_NEW_SOURCES:
case IGMPV3_BLOCK_OLD_SOURCES:
break;
default:
continue;
}
src = eth_hdr(skb)->h_source;
if (nsrcs == 0 &&
(type == IGMPV3_CHANGE_TO_INCLUDE ||
type == IGMPV3_MODE_IS_INCLUDE)) {
if (!pmctx || igmpv2) {
br_ip4_multicast_leave_group(brmctx, pmctx,
group, vid, src);
continue;
}
} else {
err = br_ip4_multicast_add_group(brmctx, pmctx, group,
vid, src, igmpv2);
if (err)
break;
}
if (!pmctx || igmpv2)
continue;
spin_lock(&brmctx->br->multicast_lock);
if (!br_multicast_ctx_should_use(brmctx, pmctx))
goto unlock_continue;
mdst = br_mdb_ip4_get(brmctx->br, group, vid);
if (!mdst)
goto unlock_continue;
pg = br_multicast_find_port(mdst, pmctx->port, src);
if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
goto unlock_continue;
/* reload grec and host addr */
grec = (void *)(skb->data + len - sizeof(*grec) - (nsrcs * 4));
h_addr = &ip_hdr(skb)->saddr;
switch (type) {
case IGMPV3_ALLOW_NEW_SOURCES:
changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
grec->grec_src,
nsrcs, sizeof(__be32), type);
break;
case IGMPV3_MODE_IS_INCLUDE:
changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
grec->grec_src,
nsrcs, sizeof(__be32), type);
break;
case IGMPV3_MODE_IS_EXCLUDE:
changed = br_multicast_isexc(brmctx, pg, h_addr,
grec->grec_src,
nsrcs, sizeof(__be32), type);
break;
case IGMPV3_CHANGE_TO_INCLUDE:
changed = br_multicast_toin(brmctx, pmctx, pg, h_addr,
grec->grec_src,
nsrcs, sizeof(__be32), type);
break;
case IGMPV3_CHANGE_TO_EXCLUDE:
changed = br_multicast_toex(brmctx, pmctx, pg, h_addr,
grec->grec_src,
nsrcs, sizeof(__be32), type);
break;
case IGMPV3_BLOCK_OLD_SOURCES:
changed = br_multicast_block(brmctx, pmctx, pg, h_addr,
grec->grec_src,
nsrcs, sizeof(__be32), type);
break;
}
if (changed)
br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB);
unlock_continue:
spin_unlock(&brmctx->br->multicast_lock);
}
return err;
}
#if IS_ENABLED(CONFIG_IPV6)
static int br_ip6_multicast_mld2_report(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct sk_buff *skb,
u16 vid)
{
bool mldv1 = brmctx->multicast_mld_version == 1;
struct net_bridge_mdb_entry *mdst;
struct net_bridge_port_group *pg;
unsigned int nsrcs_offset;
struct mld2_report *mld2r;
const unsigned char *src;
struct in6_addr *h_addr;
struct mld2_grec *grec;
unsigned int grec_len;
bool changed = false;
int i, len, num;
int err = 0;
if (!ipv6_mc_may_pull(skb, sizeof(*mld2r)))
return -EINVAL;
mld2r = (struct mld2_report *)icmp6_hdr(skb);
num = ntohs(mld2r->mld2r_ngrec);
len = skb_transport_offset(skb) + sizeof(*mld2r);
for (i = 0; i < num; i++) {
__be16 *_nsrcs, __nsrcs;
u16 nsrcs;
nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
nsrcs_offset + sizeof(__nsrcs))
return -EINVAL;
_nsrcs = skb_header_pointer(skb, nsrcs_offset,
sizeof(__nsrcs), &__nsrcs);
if (!_nsrcs)
return -EINVAL;
nsrcs = ntohs(*_nsrcs);
grec_len = struct_size(grec, grec_src, nsrcs);
if (!ipv6_mc_may_pull(skb, len + grec_len))
return -EINVAL;
grec = (struct mld2_grec *)(skb->data + len);
len += grec_len;
switch (grec->grec_type) {
case MLD2_MODE_IS_INCLUDE:
case MLD2_MODE_IS_EXCLUDE:
case MLD2_CHANGE_TO_INCLUDE:
case MLD2_CHANGE_TO_EXCLUDE:
case MLD2_ALLOW_NEW_SOURCES:
case MLD2_BLOCK_OLD_SOURCES:
break;
default:
continue;
}
src = eth_hdr(skb)->h_source;
if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
nsrcs == 0) {
if (!pmctx || mldv1) {
br_ip6_multicast_leave_group(brmctx, pmctx,
&grec->grec_mca,
vid, src);
continue;
}
} else {
err = br_ip6_multicast_add_group(brmctx, pmctx,
&grec->grec_mca, vid,
src, mldv1);
if (err)
break;
}
if (!pmctx || mldv1)
continue;
spin_lock(&brmctx->br->multicast_lock);
if (!br_multicast_ctx_should_use(brmctx, pmctx))
goto unlock_continue;
mdst = br_mdb_ip6_get(brmctx->br, &grec->grec_mca, vid);
if (!mdst)
goto unlock_continue;
pg = br_multicast_find_port(mdst, pmctx->port, src);
if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
goto unlock_continue;
h_addr = &ipv6_hdr(skb)->saddr;
switch (grec->grec_type) {
case MLD2_ALLOW_NEW_SOURCES:
changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
grec->grec_src, nsrcs,
sizeof(struct in6_addr),
grec->grec_type);
break;
case MLD2_MODE_IS_INCLUDE:
changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
grec->grec_src, nsrcs,
sizeof(struct in6_addr),
grec->grec_type);
break;
case MLD2_MODE_IS_EXCLUDE:
changed = br_multicast_isexc(brmctx, pg, h_addr,
grec->grec_src, nsrcs,
sizeof(struct in6_addr),
grec->grec_type);
break;
case MLD2_CHANGE_TO_INCLUDE:
changed = br_multicast_toin(brmctx, pmctx, pg, h_addr,
grec->grec_src, nsrcs,
sizeof(struct in6_addr),
grec->grec_type);
break;
case MLD2_CHANGE_TO_EXCLUDE:
changed = br_multicast_toex(brmctx, pmctx, pg, h_addr,
grec->grec_src, nsrcs,
sizeof(struct in6_addr),
grec->grec_type);
break;
case MLD2_BLOCK_OLD_SOURCES:
changed = br_multicast_block(brmctx, pmctx, pg, h_addr,
grec->grec_src, nsrcs,
sizeof(struct in6_addr),
grec->grec_type);
break;
}
if (changed)
br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB);
unlock_continue:
spin_unlock(&brmctx->br->multicast_lock);
}
return err;
}
#endif
static bool br_multicast_select_querier(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct br_ip *saddr)
{
int port_ifidx = pmctx ? pmctx->port->dev->ifindex : 0;
struct timer_list *own_timer, *other_timer;
struct bridge_mcast_querier *querier;
switch (saddr->proto) {
case htons(ETH_P_IP):
querier = &brmctx->ip4_querier;
own_timer = &brmctx->ip4_own_query.timer;
other_timer = &brmctx->ip4_other_query.timer;
if (!querier->addr.src.ip4 ||
ntohl(saddr->src.ip4) <= ntohl(querier->addr.src.ip4))
goto update;
break;
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
querier = &brmctx->ip6_querier;
own_timer = &brmctx->ip6_own_query.timer;
other_timer = &brmctx->ip6_other_query.timer;
if (ipv6_addr_cmp(&saddr->src.ip6, &querier->addr.src.ip6) <= 0)
goto update;
break;
#endif
default:
return false;
}
if (!timer_pending(own_timer) && !timer_pending(other_timer))
goto update;
return false;
update:
br_multicast_update_querier(brmctx, querier, port_ifidx, saddr);
return true;
}
static struct net_bridge_port *
__br_multicast_get_querier_port(struct net_bridge *br,
const struct bridge_mcast_querier *querier)
{
int port_ifidx = READ_ONCE(querier->port_ifidx);
struct net_bridge_port *p;
struct net_device *dev;
if (port_ifidx == 0)
return NULL;
dev = dev_get_by_index_rcu(dev_net(br->dev), port_ifidx);
if (!dev)
return NULL;
p = br_port_get_rtnl_rcu(dev);
if (!p || p->br != br)
return NULL;
return p;
}
size_t br_multicast_querier_state_size(void)
{
return nla_total_size(0) + /* nest attribute */
nla_total_size(sizeof(__be32)) + /* BRIDGE_QUERIER_IP_ADDRESS */
nla_total_size(sizeof(int)) + /* BRIDGE_QUERIER_IP_PORT */
nla_total_size_64bit(sizeof(u64)) + /* BRIDGE_QUERIER_IP_OTHER_TIMER */
#if IS_ENABLED(CONFIG_IPV6)
nla_total_size(sizeof(struct in6_addr)) + /* BRIDGE_QUERIER_IPV6_ADDRESS */
nla_total_size(sizeof(int)) + /* BRIDGE_QUERIER_IPV6_PORT */
nla_total_size_64bit(sizeof(u64)) + /* BRIDGE_QUERIER_IPV6_OTHER_TIMER */
#endif
0;
}
/* protected by rtnl or rcu */
int br_multicast_dump_querier_state(struct sk_buff *skb,
const struct net_bridge_mcast *brmctx,
int nest_attr)
{
struct bridge_mcast_querier querier = {};
struct net_bridge_port *p;
struct nlattr *nest;
if (!br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) ||
br_multicast_ctx_vlan_global_disabled(brmctx))
return 0;
nest = nla_nest_start(skb, nest_attr);
if (!nest)
return -EMSGSIZE;
rcu_read_lock();
if (!brmctx->multicast_querier &&
!timer_pending(&brmctx->ip4_other_query.timer))
goto out_v6;
br_multicast_read_querier(&brmctx->ip4_querier, &querier);
if (nla_put_in_addr(skb, BRIDGE_QUERIER_IP_ADDRESS,
querier.addr.src.ip4)) {
rcu_read_unlock();
goto out_err;
}
p = __br_multicast_get_querier_port(brmctx->br, &querier);
if (timer_pending(&brmctx->ip4_other_query.timer) &&
(nla_put_u64_64bit(skb, BRIDGE_QUERIER_IP_OTHER_TIMER,
br_timer_value(&brmctx->ip4_other_query.timer),
BRIDGE_QUERIER_PAD) ||
(p && nla_put_u32(skb, BRIDGE_QUERIER_IP_PORT, p->dev->ifindex)))) {
rcu_read_unlock();
goto out_err;
}
out_v6:
#if IS_ENABLED(CONFIG_IPV6)
if (!brmctx->multicast_querier &&
!timer_pending(&brmctx->ip6_other_query.timer))
goto out;
br_multicast_read_querier(&brmctx->ip6_querier, &querier);
if (nla_put_in6_addr(skb, BRIDGE_QUERIER_IPV6_ADDRESS,
&querier.addr.src.ip6)) {
rcu_read_unlock();
goto out_err;
}
p = __br_multicast_get_querier_port(brmctx->br, &querier);
if (timer_pending(&brmctx->ip6_other_query.timer) &&
(nla_put_u64_64bit(skb, BRIDGE_QUERIER_IPV6_OTHER_TIMER,
br_timer_value(&brmctx->ip6_other_query.timer),
BRIDGE_QUERIER_PAD) ||
(p && nla_put_u32(skb, BRIDGE_QUERIER_IPV6_PORT,
p->dev->ifindex)))) {
rcu_read_unlock();
goto out_err;
}
out:
#endif
rcu_read_unlock();
nla_nest_end(skb, nest);
if (!nla_len(nest))
nla_nest_cancel(skb, nest);
return 0;
out_err:
nla_nest_cancel(skb, nest);
return -EMSGSIZE;
}
static void
br_multicast_update_query_timer(struct net_bridge_mcast *brmctx,
struct bridge_mcast_other_query *query,
unsigned long max_delay)
{
if (!timer_pending(&query->timer))
query->delay_time = jiffies + max_delay;
mod_timer(&query->timer, jiffies + brmctx->multicast_querier_interval);
}
static void br_port_mc_router_state_change(struct net_bridge_port *p,
bool is_mc_router)
{
struct switchdev_attr attr = {
.orig_dev = p->dev,
.id = SWITCHDEV_ATTR_ID_PORT_MROUTER,
.flags = SWITCHDEV_F_DEFER,
.u.mrouter = is_mc_router,
};
switchdev_port_attr_set(p->dev, &attr, NULL);
}
static struct net_bridge_port *
br_multicast_rport_from_node(struct net_bridge_mcast *brmctx,
struct hlist_head *mc_router_list,
struct hlist_node *rlist)
{
struct net_bridge_mcast_port *pmctx;
#if IS_ENABLED(CONFIG_IPV6)
if (mc_router_list == &brmctx->ip6_mc_router_list)
pmctx = hlist_entry(rlist, struct net_bridge_mcast_port,
ip6_rlist);
else
#endif
pmctx = hlist_entry(rlist, struct net_bridge_mcast_port,
ip4_rlist);
return pmctx->port;
}
static struct hlist_node *
br_multicast_get_rport_slot(struct net_bridge_mcast *brmctx,
struct net_bridge_port *port,
struct hlist_head *mc_router_list)
{
struct hlist_node *slot = NULL;
struct net_bridge_port *p;
struct hlist_node *rlist;
hlist_for_each(rlist, mc_router_list) {
p = br_multicast_rport_from_node(brmctx, mc_router_list, rlist);
if ((unsigned long)port >= (unsigned long)p)
break;
slot = rlist;
}
return slot;
}
static bool br_multicast_no_router_otherpf(struct net_bridge_mcast_port *pmctx,
struct hlist_node *rnode)
{
#if IS_ENABLED(CONFIG_IPV6)
if (rnode != &pmctx->ip6_rlist)
return hlist_unhashed(&pmctx->ip6_rlist);
else
return hlist_unhashed(&pmctx->ip4_rlist);
#else
return true;
#endif
}
/* Add port to router_list
* list is maintained ordered by pointer value
* and locked by br->multicast_lock and RCU
*/
static void br_multicast_add_router(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct hlist_node *rlist,
struct hlist_head *mc_router_list)
{
struct hlist_node *slot;
if (!hlist_unhashed(rlist))
return;
slot = br_multicast_get_rport_slot(brmctx, pmctx->port, mc_router_list);
if (slot)
hlist_add_behind_rcu(rlist, slot);
else
hlist_add_head_rcu(rlist, mc_router_list);
/* For backwards compatibility for now, only notify if we
* switched from no IPv4/IPv6 multicast router to a new
* IPv4 or IPv6 multicast router.
*/
if (br_multicast_no_router_otherpf(pmctx, rlist)) {
br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_NEWMDB);
br_port_mc_router_state_change(pmctx->port, true);
}
}
/* Add port to router_list
* list is maintained ordered by pointer value
* and locked by br->multicast_lock and RCU
*/
static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx)
{
br_multicast_add_router(brmctx, pmctx, &pmctx->ip4_rlist,
&brmctx->ip4_mc_router_list);
}
/* Add port to router_list
* list is maintained ordered by pointer value
* and locked by br->multicast_lock and RCU
*/
static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx)
{
#if IS_ENABLED(CONFIG_IPV6)
br_multicast_add_router(brmctx, pmctx, &pmctx->ip6_rlist,
&brmctx->ip6_mc_router_list);
#endif
}
static void br_multicast_mark_router(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct timer_list *timer,
struct hlist_node *rlist,
struct hlist_head *mc_router_list)
{
unsigned long now = jiffies;
if (!br_multicast_ctx_should_use(brmctx, pmctx))
return;
if (!pmctx) {
if (brmctx->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) {
if (!br_ip4_multicast_is_router(brmctx) &&
!br_ip6_multicast_is_router(brmctx))
br_mc_router_state_change(brmctx->br, true);
mod_timer(timer, now + brmctx->multicast_querier_interval);
}
return;
}
if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
pmctx->multicast_router == MDB_RTR_TYPE_PERM)
return;
br_multicast_add_router(brmctx, pmctx, rlist, mc_router_list);
mod_timer(timer, now + brmctx->multicast_querier_interval);
}
static void br_ip4_multicast_mark_router(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx)
{
struct timer_list *timer = &brmctx->ip4_mc_router_timer;
struct hlist_node *rlist = NULL;
if (pmctx) {
timer = &pmctx->ip4_mc_router_timer;
rlist = &pmctx->ip4_rlist;
}
br_multicast_mark_router(brmctx, pmctx, timer, rlist,
&brmctx->ip4_mc_router_list);
}
static void br_ip6_multicast_mark_router(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx)
{
#if IS_ENABLED(CONFIG_IPV6)
struct timer_list *timer = &brmctx->ip6_mc_router_timer;
struct hlist_node *rlist = NULL;
if (pmctx) {
timer = &pmctx->ip6_mc_router_timer;
rlist = &pmctx->ip6_rlist;
}
br_multicast_mark_router(brmctx, pmctx, timer, rlist,
&brmctx->ip6_mc_router_list);
#endif
}
static void
br_ip4_multicast_query_received(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct bridge_mcast_other_query *query,
struct br_ip *saddr,
unsigned long max_delay)
{
if (!br_multicast_select_querier(brmctx, pmctx, saddr))
return;
br_multicast_update_query_timer(brmctx, query, max_delay);
br_ip4_multicast_mark_router(brmctx, pmctx);
}
#if IS_ENABLED(CONFIG_IPV6)
static void
br_ip6_multicast_query_received(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct bridge_mcast_other_query *query,
struct br_ip *saddr,
unsigned long max_delay)
{
if (!br_multicast_select_querier(brmctx, pmctx, saddr))
return;
br_multicast_update_query_timer(brmctx, query, max_delay);
br_ip6_multicast_mark_router(brmctx, pmctx);
}
#endif
static void br_ip4_multicast_query(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct sk_buff *skb,
u16 vid)
{
unsigned int transport_len = ip_transport_len(skb);
const struct iphdr *iph = ip_hdr(skb);
struct igmphdr *ih = igmp_hdr(skb);
struct net_bridge_mdb_entry *mp;
struct igmpv3_query *ih3;
struct net_bridge_port_group *p;
struct net_bridge_port_group __rcu **pp;
struct br_ip saddr = {};
unsigned long max_delay;
unsigned long now = jiffies;
__be32 group;
spin_lock(&brmctx->br->multicast_lock);
if (!br_multicast_ctx_should_use(brmctx, pmctx))
goto out;
group = ih->group;
if (transport_len == sizeof(*ih)) {
max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
if (!max_delay) {
max_delay = 10 * HZ;
group = 0;
}
} else if (transport_len >= sizeof(*ih3)) {
ih3 = igmpv3_query_hdr(skb);
if (ih3->nsrcs ||
(brmctx->multicast_igmp_version == 3 && group &&
ih3->suppress))
goto out;
max_delay = ih3->code ?
IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
} else {
goto out;
}
if (!group) {
saddr.proto = htons(ETH_P_IP);
saddr.src.ip4 = iph->saddr;
br_ip4_multicast_query_received(brmctx, pmctx,
&brmctx->ip4_other_query,
&saddr, max_delay);
goto out;
}
mp = br_mdb_ip4_get(brmctx->br, group, vid);
if (!mp)
goto out;
max_delay *= brmctx->multicast_last_member_count;
if (mp->host_joined &&
(timer_pending(&mp->timer) ?
time_after(mp->timer.expires, now + max_delay) :
try_to_del_timer_sync(&mp->timer) >= 0))
mod_timer(&mp->timer, now + max_delay);
for (pp = &mp->ports;
(p = mlock_dereference(*pp, brmctx->br)) != NULL;
pp = &p->next) {
if (timer_pending(&p->timer) ?
time_after(p->timer.expires, now + max_delay) :
try_to_del_timer_sync(&p->timer) >= 0 &&
(brmctx->multicast_igmp_version == 2 ||
p->filter_mode == MCAST_EXCLUDE))
mod_timer(&p->timer, now + max_delay);
}
out:
spin_unlock(&brmctx->br->multicast_lock);
}
#if IS_ENABLED(CONFIG_IPV6)
static int br_ip6_multicast_query(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct sk_buff *skb,
u16 vid)
{
unsigned int transport_len = ipv6_transport_len(skb);
struct mld_msg *mld;
struct net_bridge_mdb_entry *mp;
struct mld2_query *mld2q;
struct net_bridge_port_group *p;
struct net_bridge_port_group __rcu **pp;
struct br_ip saddr = {};
unsigned long max_delay;
unsigned long now = jiffies;
unsigned int offset = skb_transport_offset(skb);
const struct in6_addr *group = NULL;
bool is_general_query;
int err = 0;
spin_lock(&brmctx->br->multicast_lock);
if (!br_multicast_ctx_should_use(brmctx, pmctx))
goto out;
if (transport_len == sizeof(*mld)) {
if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
err = -EINVAL;
goto out;
}
mld = (struct mld_msg *) icmp6_hdr(skb);
max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
if (max_delay)
group = &mld->mld_mca;
} else {
if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
err = -EINVAL;
goto out;
}
mld2q = (struct mld2_query *)icmp6_hdr(skb);
if (!mld2q->mld2q_nsrcs)
group = &mld2q->mld2q_mca;
if (brmctx->multicast_mld_version == 2 &&
!ipv6_addr_any(&mld2q->mld2q_mca) &&
mld2q->mld2q_suppress)
goto out;
max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
}
is_general_query = group && ipv6_addr_any(group);
if (is_general_query) {
saddr.proto = htons(ETH_P_IPV6);
saddr.src.ip6 = ipv6_hdr(skb)->saddr;
br_ip6_multicast_query_received(brmctx, pmctx,
&brmctx->ip6_other_query,
&saddr, max_delay);
goto out;
} else if (!group) {
goto out;
}
mp = br_mdb_ip6_get(brmctx->br, group, vid);
if (!mp)
goto out;
max_delay *= brmctx->multicast_last_member_count;
if (mp->host_joined &&
(timer_pending(&mp->timer) ?
time_after(mp->timer.expires, now + max_delay) :
try_to_del_timer_sync(&mp->timer) >= 0))
mod_timer(&mp->timer, now + max_delay);
for (pp = &mp->ports;
(p = mlock_dereference(*pp, brmctx->br)) != NULL;
pp = &p->next) {
if (timer_pending(&p->timer) ?
time_after(p->timer.expires, now + max_delay) :
try_to_del_timer_sync(&p->timer) >= 0 &&
(brmctx->multicast_mld_version == 1 ||
p->filter_mode == MCAST_EXCLUDE))
mod_timer(&p->timer, now + max_delay);
}
out:
spin_unlock(&brmctx->br->multicast_lock);
return err;
}
#endif
static void
br_multicast_leave_group(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct br_ip *group,
struct bridge_mcast_other_query *other_query,
struct bridge_mcast_own_query *own_query,
const unsigned char *src)
{
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p;
unsigned long now;
unsigned long time;
spin_lock(&brmctx->br->multicast_lock);
if (!br_multicast_ctx_should_use(brmctx, pmctx))
goto out;
mp = br_mdb_ip_get(brmctx->br, group);
if (!mp)
goto out;
if (pmctx && (pmctx->port->flags & BR_MULTICAST_FAST_LEAVE)) {
struct net_bridge_port_group __rcu **pp;
for (pp = &mp->ports;
(p = mlock_dereference(*pp, brmctx->br)) != NULL;
pp = &p->next) {
if (!br_port_group_equal(p, pmctx->port, src))
continue;
if (p->flags & MDB_PG_FLAGS_PERMANENT)
break;
p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
br_multicast_del_pg(mp, p, pp);
}
goto out;
}
if (timer_pending(&other_query->timer))
goto out;
if (brmctx->multicast_querier) {
__br_multicast_send_query(brmctx, pmctx, NULL, NULL, &mp->addr,
false, 0, NULL);
time = jiffies + brmctx->multicast_last_member_count *
brmctx->multicast_last_member_interval;
mod_timer(&own_query->timer, time);
for (p = mlock_dereference(mp->ports, brmctx->br);
p != NULL && pmctx != NULL;
p = mlock_dereference(p->next, brmctx->br)) {
if (!br_port_group_equal(p, pmctx->port, src))
continue;
if (!hlist_unhashed(&p->mglist) &&
(timer_pending(&p->timer) ?
time_after(p->timer.expires, time) :
try_to_del_timer_sync(&p->timer) >= 0)) {
mod_timer(&p->timer, time);
}
break;
}
}
now = jiffies;
time = now + brmctx->multicast_last_member_count *
brmctx->multicast_last_member_interval;
if (!pmctx) {
if (mp->host_joined &&
(timer_pending(&mp->timer) ?
time_after(mp->timer.expires, time) :
try_to_del_timer_sync(&mp->timer) >= 0)) {
mod_timer(&mp->timer, time);
}
goto out;
}
for (p = mlock_dereference(mp->ports, brmctx->br);
p != NULL;
p = mlock_dereference(p->next, brmctx->br)) {
if (p->key.port != pmctx->port)
continue;
if (!hlist_unhashed(&p->mglist) &&
(timer_pending(&p->timer) ?
time_after(p->timer.expires, time) :
try_to_del_timer_sync(&p->timer) >= 0)) {
mod_timer(&p->timer, time);
}
break;
}
out:
spin_unlock(&brmctx->br->multicast_lock);
}
static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
__be32 group,
__u16 vid,
const unsigned char *src)
{
struct br_ip br_group;
struct bridge_mcast_own_query *own_query;
if (ipv4_is_local_multicast(group))
return;
own_query = pmctx ? &pmctx->ip4_own_query : &brmctx->ip4_own_query;
memset(&br_group, 0, sizeof(br_group));
br_group.dst.ip4 = group;
br_group.proto = htons(ETH_P_IP);
br_group.vid = vid;
br_multicast_leave_group(brmctx, pmctx, &br_group,
&brmctx->ip4_other_query,
own_query, src);
}
#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
const struct in6_addr *group,
__u16 vid,
const unsigned char *src)
{
struct br_ip br_group;
struct bridge_mcast_own_query *own_query;
if (ipv6_addr_is_ll_all_nodes(group))
return;
own_query = pmctx ? &pmctx->ip6_own_query : &brmctx->ip6_own_query;
memset(&br_group, 0, sizeof(br_group));
br_group.dst.ip6 = *group;
br_group.proto = htons(ETH_P_IPV6);
br_group.vid = vid;
br_multicast_leave_group(brmctx, pmctx, &br_group,
&brmctx->ip6_other_query,
own_query, src);
}
#endif
static void br_multicast_err_count(const struct net_bridge *br,
const struct net_bridge_port *p,
__be16 proto)
{
struct bridge_mcast_stats __percpu *stats;
struct bridge_mcast_stats *pstats;
if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
return;
if (p)
stats = p->mcast_stats;
else
stats = br->mcast_stats;
if (WARN_ON(!stats))
return;
pstats = this_cpu_ptr(stats);
u64_stats_update_begin(&pstats->syncp);
switch (proto) {
case htons(ETH_P_IP):
pstats->mstats.igmp_parse_errors++;
break;
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
pstats->mstats.mld_parse_errors++;
break;
#endif
}
u64_stats_update_end(&pstats->syncp);
}
static void br_multicast_pim(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
const struct sk_buff *skb)
{
unsigned int offset = skb_transport_offset(skb);
struct pimhdr *pimhdr, _pimhdr;
pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr);
if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION ||
pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
return;
spin_lock(&brmctx->br->multicast_lock);
br_ip4_multicast_mark_router(brmctx, pmctx);
spin_unlock(&brmctx->br->multicast_lock);
}
static int br_ip4_multicast_mrd_rcv(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct sk_buff *skb)
{
if (ip_hdr(skb)->protocol != IPPROTO_IGMP ||
igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
return -ENOMSG;
spin_lock(&brmctx->br->multicast_lock);
br_ip4_multicast_mark_router(brmctx, pmctx);
spin_unlock(&brmctx->br->multicast_lock);
return 0;
}
static int br_multicast_ipv4_rcv(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct sk_buff *skb,
u16 vid)
{
struct net_bridge_port *p = pmctx ? pmctx->port : NULL;
const unsigned char *src;
struct igmphdr *ih;
int err;
err = ip_mc_check_igmp(skb);
if (err == -ENOMSG) {
if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
} else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
if (ip_hdr(skb)->protocol == IPPROTO_PIM)
br_multicast_pim(brmctx, pmctx, skb);
} else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) {
br_ip4_multicast_mrd_rcv(brmctx, pmctx, skb);
}
return 0;
} else if (err < 0) {
br_multicast_err_count(brmctx->br, p, skb->protocol);
return err;
}
ih = igmp_hdr(skb);
src = eth_hdr(skb)->h_source;
BR_INPUT_SKB_CB(skb)->igmp = ih->type;
switch (ih->type) {
case IGMP_HOST_MEMBERSHIP_REPORT:
case IGMPV2_HOST_MEMBERSHIP_REPORT:
BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
err = br_ip4_multicast_add_group(brmctx, pmctx, ih->group, vid,
src, true);
break;
case IGMPV3_HOST_MEMBERSHIP_REPORT:
err = br_ip4_multicast_igmp3_report(brmctx, pmctx, skb, vid);
break;
case IGMP_HOST_MEMBERSHIP_QUERY:
br_ip4_multicast_query(brmctx, pmctx, skb, vid);
break;
case IGMP_HOST_LEAVE_MESSAGE:
br_ip4_multicast_leave_group(brmctx, pmctx, ih->group, vid, src);
break;
}
br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp,
BR_MCAST_DIR_RX);
return err;
}
#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_mrd_rcv(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct sk_buff *skb)
{
if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
return;
spin_lock(&brmctx->br->multicast_lock);
br_ip6_multicast_mark_router(brmctx, pmctx);
spin_unlock(&brmctx->br->multicast_lock);
}
static int br_multicast_ipv6_rcv(struct net_bridge_mcast *brmctx,
struct net_bridge_mcast_port *pmctx,
struct sk_buff *skb,
u16 vid)
{
struct net_bridge_port *p = pmctx ? pmctx->port : NULL;
const unsigned char *src;
struct mld_msg *mld;
int err;
err = ipv6_mc_check_mld(skb);
if (err == -ENOMSG || err == -ENODATA) {
if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
if (err == -ENODATA &&
ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr))
br_ip6_multicast_mrd_rcv(brmctx, pmctx, skb);
return 0;
} else if (err < 0) {
br_multicast_err_count(brmctx->br, p, skb->protocol);
return err;
}
mld = (struct mld_msg *)skb_transport_header(skb);
BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
switch (mld->mld_type) {
case ICMPV6_MGM_REPORT:
src = eth_hdr(skb)->h_source;
BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
err = br_ip6_multicast_add_group(brmctx, pmctx, &mld->mld_mca,
vid, src, true);
break;
case ICMPV6_MLD2_REPORT:
err = br_ip6_multicast_mld2_report(brmctx, pmctx, skb, vid);
break;
case ICMPV6_MGM_QUERY:
err = br_ip6_multicast_query(brmctx, pmctx, skb, vid);
break;
case ICMPV6_MGM_REDUCTION:
src = eth_hdr(skb)->h_source;
br_ip6_multicast_leave_group(brmctx, pmctx, &mld->mld_mca, vid,
src);
break;
}
br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp,
BR_MCAST_DIR_RX);
return err;
}
#endif
int br_multicast_rcv(struct net_bridge_mcast **brmctx,
struct net_bridge_mcast_port **pmctx,
struct net_bridge_vlan *vlan,
struct sk_buff *skb, u16 vid)
{
int ret = 0;
BR_INPUT_SKB_CB(skb)->igmp = 0;
BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
if (!br_opt_get((*brmctx)->br, BROPT_MULTICAST_ENABLED))
return 0;
if (br_opt_get((*brmctx)->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) && vlan) {
const struct net_bridge_vlan *masterv;
/* the vlan has the master flag set only when transmitting
* through the bridge device
*/
if (br_vlan_is_master(vlan)) {
masterv = vlan;
*brmctx = &vlan->br_mcast_ctx;
*pmctx = NULL;
} else {
masterv = vlan->brvlan;
*brmctx = &vlan->brvlan->br_mcast_ctx;
*pmctx = &vlan->port_mcast_ctx;
}
if (!(masterv->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED))
return 0;
}
switch (skb->protocol) {
case htons(ETH_P_IP):
ret = br_multicast_ipv4_rcv(*brmctx, *pmctx, skb, vid);
break;
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
ret = br_multicast_ipv6_rcv(*brmctx, *pmctx, skb, vid);
break;
#endif
}
return ret;
}
static void br_multicast_query_expired(struct net_bridge_mcast *brmctx,
struct bridge_mcast_own_query *query,
struct bridge_mcast_querier *querier)
{
spin_lock(&brmctx->br->multicast_lock);
if (br_multicast_ctx_vlan_disabled(brmctx))
goto out;
if (query->startup_sent < brmctx->multicast_startup_query_count)
query->startup_sent++;
br_multicast_send_query(brmctx, NULL, query);
out:
spin_unlock(&brmctx->br->multicast_lock);
}
static void br_ip4_multicast_query_expired(struct timer_list *t)
{
struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
ip4_own_query.timer);
br_multicast_query_expired(brmctx, &brmctx->ip4_own_query,
&brmctx->ip4_querier);
}
#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_query_expired(struct timer_list *t)
{
struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
ip6_own_query.timer);
br_multicast_query_expired(brmctx, &brmctx->ip6_own_query,
&brmctx->ip6_querier);
}
#endif
static void br_multicast_gc_work(struct work_struct *work)
{
struct net_bridge *br = container_of(work, struct net_bridge,
mcast_gc_work);
HLIST_HEAD(deleted_head);
spin_lock_bh(&br->multicast_lock);
hlist_move_list(&br->mcast_gc_list, &deleted_head);
spin_unlock_bh(&br->multicast_lock);
br_multicast_gc(&deleted_head);
}
void br_multicast_ctx_init(struct net_bridge *br,
struct net_bridge_vlan *vlan,
struct net_bridge_mcast *brmctx)
{
brmctx->br = br;
brmctx->vlan = vlan;
brmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
brmctx->multicast_last_member_count = 2;
brmctx->multicast_startup_query_count = 2;
brmctx->multicast_last_member_interval = HZ;
brmctx->multicast_query_response_interval = 10 * HZ;
brmctx->multicast_startup_query_interval = 125 * HZ / 4;
brmctx->multicast_query_interval = 125 * HZ;
brmctx->multicast_querier_interval = 255 * HZ;
brmctx->multicast_membership_interval = 260 * HZ;
brmctx->ip4_other_query.delay_time = 0;
brmctx->ip4_querier.port_ifidx = 0;
seqcount_spinlock_init(&brmctx->ip4_querier.seq, &br->multicast_lock);
brmctx->multicast_igmp_version = 2;
#if IS_ENABLED(CONFIG_IPV6)
brmctx->multicast_mld_version = 1;
brmctx->ip6_other_query.delay_time = 0;
brmctx->ip6_querier.port_ifidx = 0;
seqcount_spinlock_init(&brmctx->ip6_querier.seq, &br->multicast_lock);
#endif
timer_setup(&brmctx->ip4_mc_router_timer,
br_ip4_multicast_local_router_expired, 0);
timer_setup(&brmctx->ip4_other_query.timer,
br_ip4_multicast_querier_expired, 0);
timer_setup(&brmctx->ip4_own_query.timer,
br_ip4_multicast_query_expired, 0);
#if IS_ENABLED(CONFIG_IPV6)
timer_setup(&brmctx->ip6_mc_router_timer,
br_ip6_multicast_local_router_expired, 0);
timer_setup(&brmctx->ip6_other_query.timer,
br_ip6_multicast_querier_expired, 0);
timer_setup(&brmctx->ip6_own_query.timer,
br_ip6_multicast_query_expired, 0);
#endif
}
void br_multicast_ctx_deinit(struct net_bridge_mcast *brmctx)
{
__br_multicast_stop(brmctx);
}
void br_multicast_init(struct net_bridge *br)
{
br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX;
br_multicast_ctx_init(br, NULL, &br->multicast_ctx);
br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true);
br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
spin_lock_init(&br->multicast_lock);
INIT_HLIST_HEAD(&br->mdb_list);
INIT_HLIST_HEAD(&br->mcast_gc_list);
INIT_WORK(&br->mcast_gc_work, br_multicast_gc_work);
}
static void br_ip4_multicast_join_snoopers(struct net_bridge *br)
{
struct in_device *in_dev = in_dev_get(br->dev);
if (!in_dev)
return;
__ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
in_dev_put(in_dev);
}
#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_join_snoopers(struct net_bridge *br)
{
struct in6_addr addr;
ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
ipv6_dev_mc_inc(br->dev, &addr);
}
#else
static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br)
{
}
#endif
void br_multicast_join_snoopers(struct net_bridge *br)
{
br_ip4_multicast_join_snoopers(br);
br_ip6_multicast_join_snoopers(br);
}
static void br_ip4_multicast_leave_snoopers(struct net_bridge *br)
{
struct in_device *in_dev = in_dev_get(br->dev);
if (WARN_ON(!in_dev))
return;
__ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
in_dev_put(in_dev);
}
#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
{
struct in6_addr addr;
ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
ipv6_dev_mc_dec(br->dev, &addr);
}
#else
static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
{
}
#endif
void br_multicast_leave_snoopers(struct net_bridge *br)
{
br_ip4_multicast_leave_snoopers(br);
br_ip6_multicast_leave_snoopers(br);
}
static void __br_multicast_open_query(struct net_bridge *br,
struct bridge_mcast_own_query *query)
{
query->startup_sent = 0;
if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
return;
mod_timer(&query->timer, jiffies);
}
static void __br_multicast_open(struct net_bridge_mcast *brmctx)
{
__br_multicast_open_query(brmctx->br, &brmctx->ip4_own_query);
#if IS_ENABLED(CONFIG_IPV6)
__br_multicast_open_query(brmctx->br, &brmctx->ip6_own_query);
#endif
}
void br_multicast_open(struct net_bridge *br)
{
ASSERT_RTNL();
if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
struct net_bridge_vlan_group *vg;
struct net_bridge_vlan *vlan;
vg = br_vlan_group(br);
if (vg) {
list_for_each_entry(vlan, &vg->vlan_list, vlist) {
struct net_bridge_mcast *brmctx;
brmctx = &vlan->br_mcast_ctx;
if (br_vlan_is_brentry(vlan) &&
!br_multicast_ctx_vlan_disabled(brmctx))
__br_multicast_open(&vlan->br_mcast_ctx);
}
}
} else {
__br_multicast_open(&br->multicast_ctx);
}
}
static void __br_multicast_stop(struct net_bridge_mcast *brmctx)
{
del_timer_sync(&brmctx->ip4_mc_router_timer);
del_timer_sync(&brmctx->ip4_other_query.timer);
del_timer_sync(&brmctx->ip4_own_query.timer);
#if IS_ENABLED(CONFIG_IPV6)
del_timer_sync(&brmctx->ip6_mc_router_timer);
del_timer_sync(&brmctx->ip6_other_query.timer);
del_timer_sync(&brmctx->ip6_own_query.timer);
#endif
}
void br_multicast_toggle_one_vlan(struct net_bridge_vlan *vlan, bool on)
{
struct net_bridge *br;
/* it's okay to check for the flag without the multicast lock because it
* can only change under RTNL -> multicast_lock, we need the latter to
* sync with timers and packets
*/
if (on == !!(vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED))
return;
if (br_vlan_is_master(vlan)) {
br = vlan->br;
if (!br_vlan_is_brentry(vlan) ||
(on &&
br_multicast_ctx_vlan_global_disabled(&vlan->br_mcast_ctx)))
return;
spin_lock_bh(&br->multicast_lock);
vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED;
spin_unlock_bh(&br->multicast_lock);
if (on)
__br_multicast_open(&vlan->br_mcast_ctx);
else
__br_multicast_stop(&vlan->br_mcast_ctx);
} else {
struct net_bridge_mcast *brmctx;
brmctx = br_multicast_port_ctx_get_global(&vlan->port_mcast_ctx);
if (on && br_multicast_ctx_vlan_global_disabled(brmctx))
return;
br = vlan->port->br;
spin_lock_bh(&br->multicast_lock);
vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED;
if (on)
__br_multicast_enable_port_ctx(&vlan->port_mcast_ctx);
else
__br_multicast_disable_port_ctx(&vlan->port_mcast_ctx);
spin_unlock_bh(&br->multicast_lock);
}
}
static void br_multicast_toggle_vlan(struct net_bridge_vlan *vlan, bool on)
{
struct net_bridge_port *p;
if (WARN_ON_ONCE(!br_vlan_is_master(vlan)))
return;
list_for_each_entry(p, &vlan->br->port_list, list) {
struct net_bridge_vlan *vport;
vport = br_vlan_find(nbp_vlan_group(p), vlan->vid);
if (!vport)
continue;
br_multicast_toggle_one_vlan(vport, on);
}
if (br_vlan_is_brentry(vlan))
br_multicast_toggle_one_vlan(vlan, on);
}
int br_multicast_toggle_vlan_snooping(struct net_bridge *br, bool on,
struct netlink_ext_ack *extack)
{
struct net_bridge_vlan_group *vg;
struct net_bridge_vlan *vlan;
struct net_bridge_port *p;
if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) == on)
return 0;
if (on && !br_opt_get(br, BROPT_VLAN_ENABLED)) {
NL_SET_ERR_MSG_MOD(extack, "Cannot enable multicast vlan snooping with vlan filtering disabled");
return -EINVAL;
}
vg = br_vlan_group(br);
if (!vg)
return 0;
br_opt_toggle(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED, on);
/* disable/enable non-vlan mcast contexts based on vlan snooping */
if (on)
__br_multicast_stop(&br->multicast_ctx);
else
__br_multicast_open(&br->multicast_ctx);
list_for_each_entry(p, &br->port_list, list) {
if (on)
br_multicast_disable_port(p);
else
br_multicast_enable_port(p);
}
list_for_each_entry(vlan, &vg->vlan_list, vlist)
br_multicast_toggle_vlan(vlan, on);
return 0;
}
bool br_multicast_toggle_global_vlan(struct net_bridge_vlan *vlan, bool on)
{
ASSERT_RTNL();
/* BR_VLFLAG_GLOBAL_MCAST_ENABLED relies on eventual consistency and
* requires only RTNL to change
*/
if (on == !!(vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED))
return false;
vlan->priv_flags ^= BR_VLFLAG_GLOBAL_MCAST_ENABLED;
br_multicast_toggle_vlan(vlan, on);
return true;
}
void br_multicast_stop(struct net_bridge *br)
{
ASSERT_RTNL();
if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
struct net_bridge_vlan_group *vg;
struct net_bridge_vlan *vlan;
vg = br_vlan_group(br);
if (vg) {
list_for_each_entry(vlan, &vg->vlan_list, vlist) {
struct net_bridge_mcast *brmctx;
brmctx = &vlan->br_mcast_ctx;
if (br_vlan_is_brentry(vlan) &&
!br_multicast_ctx_vlan_disabled(brmctx))
__br_multicast_stop(&vlan->br_mcast_ctx);
}
}
} else {
__br_multicast_stop(&br->multicast_ctx);
}
}
void br_multicast_dev_del(struct net_bridge *br)
{
struct net_bridge_mdb_entry *mp;
HLIST_HEAD(deleted_head);
struct hlist_node *tmp;
spin_lock_bh(&br->multicast_lock);
hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node)
br_multicast_del_mdb_entry(mp);
hlist_move_list(&br->mcast_gc_list, &deleted_head);
spin_unlock_bh(&br->multicast_lock);
br_multicast_ctx_deinit(&br->multicast_ctx);
br_multicast_gc(&deleted_head);
cancel_work_sync(&br->mcast_gc_work);
rcu_barrier();
}
int br_multicast_set_router(struct net_bridge_mcast *brmctx, unsigned long val)
{
int err = -EINVAL;
spin_lock_bh(&brmctx->br->multicast_lock);
switch (val) {
case MDB_RTR_TYPE_DISABLED:
case MDB_RTR_TYPE_PERM:
br_mc_router_state_change(brmctx->br, val == MDB_RTR_TYPE_PERM);
del_timer(&brmctx->ip4_mc_router_timer);
#if IS_ENABLED(CONFIG_IPV6)
del_timer(&brmctx->ip6_mc_router_timer);
#endif
brmctx->multicast_router = val;
err = 0;
break;
case MDB_RTR_TYPE_TEMP_QUERY:
if (brmctx->multicast_router != MDB_RTR_TYPE_TEMP_QUERY)
br_mc_router_state_change(brmctx->br, false);
brmctx->multicast_router = val;
err = 0;
break;
}
spin_unlock_bh(&brmctx->br->multicast_lock);
return err;
}
static void
br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted)
{
if (!deleted)
return;
/* For backwards compatibility for now, only notify if there is
* no multicast router anymore for both IPv4 and IPv6.
*/
if (!hlist_unhashed(&pmctx->ip4_rlist))
return;
#if IS_ENABLED(CONFIG_IPV6)
if (!hlist_unhashed(&pmctx->ip6_rlist))
return;
#endif
br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_DELMDB);
br_port_mc_router_state_change(pmctx->port, false);
/* don't allow timer refresh */
if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP)
pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
}
int br_multicast_set_port_router(struct net_bridge_mcast_port *pmctx,
unsigned long val)
{
struct net_bridge_mcast *brmctx;
unsigned long now = jiffies;
int err = -EINVAL;
bool del = false;
brmctx = br_multicast_port_ctx_get_global(pmctx);
spin_lock_bh(&brmctx->br->multicast_lock);
if (pmctx->multicast_router == val) {
/* Refresh the temp router port timer */
if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP) {
mod_timer(&pmctx->ip4_mc_router_timer,
now + brmctx->multicast_querier_interval);
#if IS_ENABLED(CONFIG_IPV6)
mod_timer(&pmctx->ip6_mc_router_timer,
now + brmctx->multicast_querier_interval);
#endif
}
err = 0;
goto unlock;
}
switch (val) {
case MDB_RTR_TYPE_DISABLED:
pmctx->multicast_router = MDB_RTR_TYPE_DISABLED;
del |= br_ip4_multicast_rport_del(pmctx);
del_timer(&pmctx->ip4_mc_router_timer);
del |= br_ip6_multicast_rport_del(pmctx);
#if IS_ENABLED(CONFIG_IPV6)
del_timer(&pmctx->ip6_mc_router_timer);
#endif
br_multicast_rport_del_notify(pmctx, del);
break;
case MDB_RTR_TYPE_TEMP_QUERY:
pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
del |= br_ip4_multicast_rport_del(pmctx);
del |= br_ip6_multicast_rport_del(pmctx);
br_multicast_rport_del_notify(pmctx, del);
break;
case MDB_RTR_TYPE_PERM:
pmctx->multicast_router = MDB_RTR_TYPE_PERM;
del_timer(&pmctx->ip4_mc_router_timer);
br_ip4_multicast_add_router(brmctx, pmctx);
#if IS_ENABLED(CONFIG_IPV6)
del_timer(&pmctx->ip6_mc_router_timer);
#endif
br_ip6_multicast_add_router(brmctx, pmctx);
break;
case MDB_RTR_TYPE_TEMP:
pmctx->multicast_router = MDB_RTR_TYPE_TEMP;
br_ip4_multicast_mark_router(brmctx, pmctx);
br_ip6_multicast_mark_router(brmctx, pmctx);
break;
default:
goto unlock;
}
err = 0;
unlock:
spin_unlock_bh(&brmctx->br->multicast_lock);
return err;
}
int br_multicast_set_vlan_router(struct net_bridge_vlan *v, u8 mcast_router)
{
int err;
if (br_vlan_is_master(v))
err = br_multicast_set_router(&v->br_mcast_ctx, mcast_router);
else
err = br_multicast_set_port_router(&v->port_mcast_ctx,
mcast_router);
return err;
}
static void br_multicast_start_querier(struct net_bridge_mcast *brmctx,
struct bridge_mcast_own_query *query)
{
struct net_bridge_port *port;
if (!br_multicast_ctx_matches_vlan_snooping(brmctx))
return;
__br_multicast_open_query(brmctx->br, query);
rcu_read_lock();
list_for_each_entry_rcu(port, &brmctx->br->port_list, list) {
struct bridge_mcast_own_query *ip4_own_query;
#if IS_ENABLED(CONFIG_IPV6)
struct bridge_mcast_own_query *ip6_own_query;
#endif
if (br_multicast_port_ctx_state_stopped(&port->multicast_ctx))
continue;
if (br_multicast_ctx_is_vlan(brmctx)) {
struct net_bridge_vlan *vlan;
vlan = br_vlan_find(nbp_vlan_group_rcu(port),
brmctx->vlan->vid);
if (!vlan ||
br_multicast_port_ctx_state_stopped(&vlan->port_mcast_ctx))
continue;
ip4_own_query = &vlan->port_mcast_ctx.ip4_own_query;
#if IS_ENABLED(CONFIG_IPV6)
ip6_own_query = &vlan->port_mcast_ctx.ip6_own_query;
#endif
} else {
ip4_own_query = &port->multicast_ctx.ip4_own_query;
#if IS_ENABLED(CONFIG_IPV6)
ip6_own_query = &port->multicast_ctx.ip6_own_query;
#endif
}
if (query == &brmctx->ip4_own_query)
br_multicast_enable(ip4_own_query);
#if IS_ENABLED(CONFIG_IPV6)
else
br_multicast_enable(ip6_own_query);
#endif
}
rcu_read_unlock();
}
int br_multicast_toggle(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
struct net_bridge_port *port;
bool change_snoopers = false;
int err = 0;
spin_lock_bh(&br->multicast_lock);
if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
goto unlock;
err = br_mc_disabled_update(br->dev, val, extack);
if (err == -EOPNOTSUPP)
err = 0;
if (err)
goto unlock;
br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
change_snoopers = true;
goto unlock;
}
if (!netif_running(br->dev))
goto unlock;
br_multicast_open(br);
list_for_each_entry(port, &br->port_list, list)
__br_multicast_enable_port_ctx(&port->multicast_ctx);
change_snoopers = true;
unlock:
spin_unlock_bh(&br->multicast_lock);
/* br_multicast_join_snoopers has the potential to cause
* an MLD Report/Leave to be delivered to br_multicast_rcv,
* which would in turn call br_multicast_add_group, which would
* attempt to acquire multicast_lock. This function should be
* called after the lock has been released to avoid deadlocks on
* multicast_lock.
*
* br_multicast_leave_snoopers does not have the problem since
* br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and
* returns without calling br_multicast_ipv4/6_rcv if it's not
* enabled. Moved both functions out just for symmetry.
*/
if (change_snoopers) {
if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
br_multicast_join_snoopers(br);
else
br_multicast_leave_snoopers(br);
}
return err;
}
bool br_multicast_enabled(const struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
return !!br_opt_get(br, BROPT_MULTICAST_ENABLED);
}
EXPORT_SYMBOL_GPL(br_multicast_enabled);
bool br_multicast_router(const struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
bool is_router;
spin_lock_bh(&br->multicast_lock);
is_router = br_multicast_is_router(&br->multicast_ctx, NULL);
spin_unlock_bh(&br->multicast_lock);
return is_router;
}
EXPORT_SYMBOL_GPL(br_multicast_router);
int br_multicast_set_querier(struct net_bridge_mcast *brmctx, unsigned long val)
{
unsigned long max_delay;
val = !!val;
spin_lock_bh(&brmctx->br->multicast_lock);
if (brmctx->multicast_querier == val)
goto unlock;
WRITE_ONCE(brmctx->multicast_querier, val);
if (!val)
goto unlock;
max_delay = brmctx->multicast_query_response_interval;
if (!timer_pending(&brmctx->ip4_other_query.timer))
brmctx->ip4_other_query.delay_time = jiffies + max_delay;
br_multicast_start_querier(brmctx, &brmctx->ip4_own_query);
#if IS_ENABLED(CONFIG_IPV6)
if (!timer_pending(&brmctx->ip6_other_query.timer))
brmctx->ip6_other_query.delay_time = jiffies + max_delay;
br_multicast_start_querier(brmctx, &brmctx->ip6_own_query);
#endif
unlock:
spin_unlock_bh(&brmctx->br->multicast_lock);
return 0;
}
int br_multicast_set_igmp_version(struct net_bridge_mcast *brmctx,
unsigned long val)
{
/* Currently we support only version 2 and 3 */
switch (val) {
case 2:
case 3:
break;
default:
return -EINVAL;
}
spin_lock_bh(&brmctx->br->multicast_lock);
brmctx->multicast_igmp_version = val;
spin_unlock_bh(&brmctx->br->multicast_lock);
return 0;
}
#if IS_ENABLED(CONFIG_IPV6)
int br_multicast_set_mld_version(struct net_bridge_mcast *brmctx,
unsigned long val)
{
/* Currently we support version 1 and 2 */
switch (val) {
case 1:
case 2:
break;
default:
return -EINVAL;
}
spin_lock_bh(&brmctx->br->multicast_lock);
brmctx->multicast_mld_version = val;
spin_unlock_bh(&brmctx->br->multicast_lock);
return 0;
}
#endif
void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx,
unsigned long val)
{
unsigned long intvl_jiffies = clock_t_to_jiffies(val);
if (intvl_jiffies < BR_MULTICAST_QUERY_INTVL_MIN) {
br_info(brmctx->br,
"trying to set multicast query interval below minimum, setting to %lu (%ums)\n",
jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MIN),
jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MIN));
intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN;
}
brmctx->multicast_query_interval = intvl_jiffies;
}
void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx,
unsigned long val)
{
unsigned long intvl_jiffies = clock_t_to_jiffies(val);
if (intvl_jiffies < BR_MULTICAST_STARTUP_QUERY_INTVL_MIN) {
br_info(brmctx->br,
"trying to set multicast startup query interval below minimum, setting to %lu (%ums)\n",
jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN),
jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN));
intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN;
}
brmctx->multicast_startup_query_interval = intvl_jiffies;
}
/**
* br_multicast_list_adjacent - Returns snooped multicast addresses
* @dev: The bridge port adjacent to which to retrieve addresses
* @br_ip_list: The list to store found, snooped multicast IP addresses in
*
* Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
* snooping feature on all bridge ports of dev's bridge device, excluding
* the addresses from dev itself.
*
* Returns the number of items added to br_ip_list.
*
* Notes:
* - br_ip_list needs to be initialized by caller
* - br_ip_list might contain duplicates in the end
* (needs to be taken care of by caller)
* - br_ip_list needs to be freed by caller
*/
int br_multicast_list_adjacent(struct net_device *dev,
struct list_head *br_ip_list)
{
struct net_bridge *br;
struct net_bridge_port *port;
struct net_bridge_port_group *group;
struct br_ip_list *entry;
int count = 0;
rcu_read_lock();
if (!br_ip_list || !netif_is_bridge_port(dev))
goto unlock;
port = br_port_get_rcu(dev);
if (!port || !port->br)
goto unlock;
br = port->br;
list_for_each_entry_rcu(port, &br->port_list, list) {
if (!port->dev || port->dev == dev)
continue;
hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
goto unlock;
entry->addr = group->key.addr;
list_add(&entry->list, br_ip_list);
count++;
}
}
unlock:
rcu_read_unlock();
return count;
}
EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
/**
* br_multicast_has_querier_anywhere - Checks for a querier on a bridge
* @dev: The bridge port providing the bridge on which to check for a querier
* @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
*
* Checks whether the given interface has a bridge on top and if so returns
* true if a valid querier exists anywhere on the bridged link layer.
* Otherwise returns false.
*/
bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
{
struct net_bridge *br;
struct net_bridge_port *port;
struct ethhdr eth;
bool ret = false;
rcu_read_lock();
if (!netif_is_bridge_port(dev))
goto unlock;
port = br_port_get_rcu(dev);
if (!port || !port->br)
goto unlock;
br = port->br;
memset(ð, 0, sizeof(eth));
eth.h_proto = htons(proto);
ret = br_multicast_querier_exists(&br->multicast_ctx, ð, NULL);
unlock:
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
/**
* br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
* @dev: The bridge port adjacent to which to check for a querier
* @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
*
* Checks whether the given interface has a bridge on top and if so returns
* true if a selected querier is behind one of the other ports of this
* bridge. Otherwise returns false.
*/
bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
{
struct net_bridge_mcast *brmctx;
struct net_bridge *br;
struct net_bridge_port *port;
bool ret = false;
int port_ifidx;
rcu_read_lock();
if (!netif_is_bridge_port(dev))
goto unlock;
port = br_port_get_rcu(dev);
if (!port || !port->br)
goto unlock;
br = port->br;
brmctx = &br->multicast_ctx;
switch (proto) {
case ETH_P_IP:
port_ifidx = brmctx->ip4_querier.port_ifidx;
if (!timer_pending(&brmctx->ip4_other_query.timer) ||
port_ifidx == port->dev->ifindex)
goto unlock;
break;
#if IS_ENABLED(CONFIG_IPV6)
case ETH_P_IPV6:
port_ifidx = brmctx->ip6_querier.port_ifidx;
if (!timer_pending(&brmctx->ip6_other_query.timer) ||
port_ifidx == port->dev->ifindex)
goto unlock;
break;
#endif
default:
goto unlock;
}
ret = true;
unlock:
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
/**
* br_multicast_has_router_adjacent - Checks for a router behind a bridge port
* @dev: The bridge port adjacent to which to check for a multicast router
* @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
*
* Checks whether the given interface has a bridge on top and if so returns
* true if a multicast router is behind one of the other ports of this
* bridge. Otherwise returns false.
*/
bool br_multicast_has_router_adjacent(struct net_device *dev, int proto)
{
struct net_bridge_mcast_port *pmctx;
struct net_bridge_mcast *brmctx;
struct net_bridge_port *port;
bool ret = false;
rcu_read_lock();
port = br_port_get_check_rcu(dev);
if (!port)
goto unlock;
brmctx = &port->br->multicast_ctx;
switch (proto) {
case ETH_P_IP:
hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list,
ip4_rlist) {
if (pmctx->port == port)
continue;
ret = true;
goto unlock;
}
break;
#if IS_ENABLED(CONFIG_IPV6)
case ETH_P_IPV6:
hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list,
ip6_rlist) {
if (pmctx->port == port)
continue;
ret = true;
goto unlock;
}
break;
#endif
default:
/* when compiled without IPv6 support, be conservative and
* always assume presence of an IPv6 multicast router
*/
ret = true;
}
unlock:
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(br_multicast_has_router_adjacent);
static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
const struct sk_buff *skb, u8 type, u8 dir)
{
struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
__be16 proto = skb->protocol;
unsigned int t_len;
u64_stats_update_begin(&pstats->syncp);
switch (proto) {
case htons(ETH_P_IP):
t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
switch (type) {
case IGMP_HOST_MEMBERSHIP_REPORT:
pstats->mstats.igmp_v1reports[dir]++;
break;
case IGMPV2_HOST_MEMBERSHIP_REPORT:
pstats->mstats.igmp_v2reports[dir]++;
break;
case IGMPV3_HOST_MEMBERSHIP_REPORT:
pstats->mstats.igmp_v3reports[dir]++;
break;
case IGMP_HOST_MEMBERSHIP_QUERY:
if (t_len != sizeof(struct igmphdr)) {
pstats->mstats.igmp_v3queries[dir]++;
} else {
unsigned int offset = skb_transport_offset(skb);
struct igmphdr *ih, _ihdr;
ih = skb_header_pointer(skb, offset,
sizeof(_ihdr), &_ihdr);
if (!ih)
break;
if (!ih->code)
pstats->mstats.igmp_v1queries[dir]++;
else
pstats->mstats.igmp_v2queries[dir]++;
}
break;
case IGMP_HOST_LEAVE_MESSAGE:
pstats->mstats.igmp_leaves[dir]++;
break;
}
break;
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
t_len = ntohs(ipv6_hdr(skb)->payload_len) +
sizeof(struct ipv6hdr);
t_len -= skb_network_header_len(skb);
switch (type) {
case ICMPV6_MGM_REPORT:
pstats->mstats.mld_v1reports[dir]++;
break;
case ICMPV6_MLD2_REPORT:
pstats->mstats.mld_v2reports[dir]++;
break;
case ICMPV6_MGM_QUERY:
if (t_len != sizeof(struct mld_msg))
pstats->mstats.mld_v2queries[dir]++;
else
pstats->mstats.mld_v1queries[dir]++;
break;
case ICMPV6_MGM_REDUCTION:
pstats->mstats.mld_leaves[dir]++;
break;
}
break;
#endif /* CONFIG_IPV6 */
}
u64_stats_update_end(&pstats->syncp);
}
void br_multicast_count(struct net_bridge *br,
const struct net_bridge_port *p,
const struct sk_buff *skb, u8 type, u8 dir)
{
struct bridge_mcast_stats __percpu *stats;
/* if multicast_disabled is true then igmp type can't be set */
if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
return;
if (p)
stats = p->mcast_stats;
else
stats = br->mcast_stats;
if (WARN_ON(!stats))
return;
br_mcast_stats_add(stats, skb, type, dir);
}
int br_multicast_init_stats(struct net_bridge *br)
{
br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
if (!br->mcast_stats)
return -ENOMEM;
return 0;
}
void br_multicast_uninit_stats(struct net_bridge *br)
{
free_percpu(br->mcast_stats);
}
/* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */
static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src)
{
dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
}
void br_multicast_get_stats(const struct net_bridge *br,
const struct net_bridge_port *p,
struct br_mcast_stats *dest)
{
struct bridge_mcast_stats __percpu *stats;
struct br_mcast_stats tdst;
int i;
memset(dest, 0, sizeof(*dest));
if (p)
stats = p->mcast_stats;
else
stats = br->mcast_stats;
if (WARN_ON(!stats))
return;
memset(&tdst, 0, sizeof(tdst));
for_each_possible_cpu(i) {
struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
struct br_mcast_stats temp;
unsigned int start;
do {
start = u64_stats_fetch_begin(&cpu_stats->syncp);
memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
} while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries);
mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
tdst.igmp_parse_errors += temp.igmp_parse_errors;
mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries);
mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries);
mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
tdst.mld_parse_errors += temp.mld_parse_errors;
}
memcpy(dest, &tdst, sizeof(*dest));
}
int br_mdb_hash_init(struct net_bridge *br)
{
int err;
err = rhashtable_init(&br->sg_port_tbl, &br_sg_port_rht_params);
if (err)
return err;
err = rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params);
if (err) {
rhashtable_destroy(&br->sg_port_tbl);
return err;
}
return 0;
}
void br_mdb_hash_fini(struct net_bridge *br)
{
rhashtable_destroy(&br->sg_port_tbl);
rhashtable_destroy(&br->mdb_hash_tbl);
}
| linux-master | net/bridge/br_multicast.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Spanning tree protocol; interface code
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include <net/switchdev.h>
#include "br_private.h"
#include "br_private_stp.h"
/* Port id is composed of priority and port number.
* NB: some bits of priority are dropped to
* make room for more ports.
*/
static inline port_id br_make_port_id(__u8 priority, __u16 port_no)
{
return ((u16)priority << BR_PORT_BITS)
| (port_no & ((1<<BR_PORT_BITS)-1));
}
#define BR_MAX_PORT_PRIORITY ((u16)~0 >> BR_PORT_BITS)
/* called under bridge lock */
void br_init_port(struct net_bridge_port *p)
{
int err;
p->port_id = br_make_port_id(p->priority, p->port_no);
br_become_designated_port(p);
br_set_state(p, BR_STATE_BLOCKING);
p->topology_change_ack = 0;
p->config_pending = 0;
err = __set_ageing_time(p->dev, p->br->ageing_time);
if (err)
netdev_err(p->dev, "failed to offload ageing time\n");
}
/* NO locks held */
void br_stp_enable_bridge(struct net_bridge *br)
{
struct net_bridge_port *p;
spin_lock_bh(&br->lock);
if (br->stp_enabled == BR_KERNEL_STP)
mod_timer(&br->hello_timer, jiffies + br->hello_time);
mod_delayed_work(system_long_wq, &br->gc_work, HZ / 10);
br_config_bpdu_generation(br);
list_for_each_entry(p, &br->port_list, list) {
if (netif_running(p->dev) && netif_oper_up(p->dev))
br_stp_enable_port(p);
}
spin_unlock_bh(&br->lock);
}
/* NO locks held */
void br_stp_disable_bridge(struct net_bridge *br)
{
struct net_bridge_port *p;
spin_lock_bh(&br->lock);
list_for_each_entry(p, &br->port_list, list) {
if (p->state != BR_STATE_DISABLED)
br_stp_disable_port(p);
}
__br_set_topology_change(br, 0);
br->topology_change_detected = 0;
spin_unlock_bh(&br->lock);
del_timer_sync(&br->hello_timer);
del_timer_sync(&br->topology_change_timer);
del_timer_sync(&br->tcn_timer);
cancel_delayed_work_sync(&br->gc_work);
}
/* called under bridge lock */
void br_stp_enable_port(struct net_bridge_port *p)
{
br_init_port(p);
br_port_state_selection(p->br);
br_ifinfo_notify(RTM_NEWLINK, NULL, p);
}
/* called under bridge lock */
void br_stp_disable_port(struct net_bridge_port *p)
{
struct net_bridge *br = p->br;
int wasroot;
wasroot = br_is_root_bridge(br);
br_become_designated_port(p);
br_set_state(p, BR_STATE_DISABLED);
p->topology_change_ack = 0;
p->config_pending = 0;
br_ifinfo_notify(RTM_NEWLINK, NULL, p);
del_timer(&p->message_age_timer);
del_timer(&p->forward_delay_timer);
del_timer(&p->hold_timer);
if (!rcu_access_pointer(p->backup_port))
br_fdb_delete_by_port(br, p, 0, 0);
br_multicast_disable_port(p);
br_configuration_update(br);
br_port_state_selection(br);
if (br_is_root_bridge(br) && !wasroot)
br_become_root_bridge(br);
}
static int br_stp_call_user(struct net_bridge *br, char *arg)
{
char *argv[] = { BR_STP_PROG, br->dev->name, arg, NULL };
char *envp[] = { NULL };
int rc;
/* call userspace STP and report program errors */
rc = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
if (rc > 0) {
if (rc & 0xff)
br_debug(br, BR_STP_PROG " received signal %d\n",
rc & 0x7f);
else
br_debug(br, BR_STP_PROG " exited with code %d\n",
(rc >> 8) & 0xff);
}
return rc;
}
static void br_stp_start(struct net_bridge *br)
{
int err = -ENOENT;
if (net_eq(dev_net(br->dev), &init_net))
err = br_stp_call_user(br, "start");
if (err && err != -ENOENT)
br_err(br, "failed to start userspace STP (%d)\n", err);
spin_lock_bh(&br->lock);
if (br->bridge_forward_delay < BR_MIN_FORWARD_DELAY)
__br_set_forward_delay(br, BR_MIN_FORWARD_DELAY);
else if (br->bridge_forward_delay > BR_MAX_FORWARD_DELAY)
__br_set_forward_delay(br, BR_MAX_FORWARD_DELAY);
if (!err) {
br->stp_enabled = BR_USER_STP;
br_debug(br, "userspace STP started\n");
} else {
br->stp_enabled = BR_KERNEL_STP;
br_debug(br, "using kernel STP\n");
/* To start timers on any ports left in blocking */
if (br->dev->flags & IFF_UP)
mod_timer(&br->hello_timer, jiffies + br->hello_time);
br_port_state_selection(br);
}
spin_unlock_bh(&br->lock);
}
static void br_stp_stop(struct net_bridge *br)
{
int err;
if (br->stp_enabled == BR_USER_STP) {
err = br_stp_call_user(br, "stop");
if (err)
br_err(br, "failed to stop userspace STP (%d)\n", err);
/* To start timers on any ports left in blocking */
spin_lock_bh(&br->lock);
br_port_state_selection(br);
spin_unlock_bh(&br->lock);
}
br->stp_enabled = BR_NO_STP;
}
int br_stp_set_enabled(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
ASSERT_RTNL();
if (br_mrp_enabled(br)) {
NL_SET_ERR_MSG_MOD(extack,
"STP can't be enabled if MRP is already enabled");
return -EINVAL;
}
if (val) {
if (br->stp_enabled == BR_NO_STP)
br_stp_start(br);
} else {
if (br->stp_enabled != BR_NO_STP)
br_stp_stop(br);
}
return 0;
}
/* called under bridge lock */
void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr)
{
/* should be aligned on 2 bytes for ether_addr_equal() */
unsigned short oldaddr_aligned[ETH_ALEN >> 1];
unsigned char *oldaddr = (unsigned char *)oldaddr_aligned;
struct net_bridge_port *p;
int wasroot;
wasroot = br_is_root_bridge(br);
br_fdb_change_mac_address(br, addr);
memcpy(oldaddr, br->bridge_id.addr, ETH_ALEN);
memcpy(br->bridge_id.addr, addr, ETH_ALEN);
eth_hw_addr_set(br->dev, addr);
list_for_each_entry(p, &br->port_list, list) {
if (ether_addr_equal(p->designated_bridge.addr, oldaddr))
memcpy(p->designated_bridge.addr, addr, ETH_ALEN);
if (ether_addr_equal(p->designated_root.addr, oldaddr))
memcpy(p->designated_root.addr, addr, ETH_ALEN);
}
br_configuration_update(br);
br_port_state_selection(br);
if (br_is_root_bridge(br) && !wasroot)
br_become_root_bridge(br);
}
/* should be aligned on 2 bytes for ether_addr_equal() */
static const unsigned short br_mac_zero_aligned[ETH_ALEN >> 1];
/* called under bridge lock */
bool br_stp_recalculate_bridge_id(struct net_bridge *br)
{
const unsigned char *br_mac_zero =
(const unsigned char *)br_mac_zero_aligned;
const unsigned char *addr = br_mac_zero;
struct net_bridge_port *p;
/* user has chosen a value so keep it */
if (br->dev->addr_assign_type == NET_ADDR_SET)
return false;
list_for_each_entry(p, &br->port_list, list) {
if (addr == br_mac_zero ||
memcmp(p->dev->dev_addr, addr, ETH_ALEN) < 0)
addr = p->dev->dev_addr;
}
if (ether_addr_equal(br->bridge_id.addr, addr))
return false; /* no change */
br_stp_change_bridge_id(br, addr);
return true;
}
/* Acquires and releases bridge lock */
void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio)
{
struct net_bridge_port *p;
int wasroot;
spin_lock_bh(&br->lock);
wasroot = br_is_root_bridge(br);
list_for_each_entry(p, &br->port_list, list) {
if (p->state != BR_STATE_DISABLED &&
br_is_designated_port(p)) {
p->designated_bridge.prio[0] = (newprio >> 8) & 0xFF;
p->designated_bridge.prio[1] = newprio & 0xFF;
}
}
br->bridge_id.prio[0] = (newprio >> 8) & 0xFF;
br->bridge_id.prio[1] = newprio & 0xFF;
br_configuration_update(br);
br_port_state_selection(br);
if (br_is_root_bridge(br) && !wasroot)
br_become_root_bridge(br);
spin_unlock_bh(&br->lock);
}
/* called under bridge lock */
int br_stp_set_port_priority(struct net_bridge_port *p, unsigned long newprio)
{
port_id new_port_id;
if (newprio > BR_MAX_PORT_PRIORITY)
return -ERANGE;
new_port_id = br_make_port_id(newprio, p->port_no);
if (br_is_designated_port(p))
p->designated_port = new_port_id;
p->port_id = new_port_id;
p->priority = newprio;
if (!memcmp(&p->br->bridge_id, &p->designated_bridge, 8) &&
p->port_id < p->designated_port) {
br_become_designated_port(p);
br_port_state_selection(p->br);
}
return 0;
}
/* called under bridge lock */
int br_stp_set_path_cost(struct net_bridge_port *p, unsigned long path_cost)
{
if (path_cost < BR_MIN_PATH_COST ||
path_cost > BR_MAX_PATH_COST)
return -ERANGE;
p->flags |= BR_ADMIN_COST;
p->path_cost = path_cost;
br_configuration_update(p->br);
br_port_state_selection(p->br);
return 0;
}
ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id)
{
return sprintf(buf, "%.2x%.2x.%.2x%.2x%.2x%.2x%.2x%.2x\n",
id->prio[0], id->prio[1],
id->addr[0], id->addr[1], id->addr[2],
id->addr[3], id->addr[4], id->addr[5]);
}
| linux-master | net/bridge/br_stp_if.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Bridge per vlan tunnel port dst_metadata netlink control interface
*
* Authors:
* Roopa Prabhu <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/etherdevice.h>
#include <net/rtnetlink.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <uapi/linux/if_bridge.h>
#include <net/dst_metadata.h>
#include "br_private.h"
#include "br_private_tunnel.h"
static size_t __get_vlan_tinfo_size(void)
{
return nla_total_size(0) + /* nest IFLA_BRIDGE_VLAN_TUNNEL_INFO */
nla_total_size(sizeof(u32)) + /* IFLA_BRIDGE_VLAN_TUNNEL_ID */
nla_total_size(sizeof(u16)) + /* IFLA_BRIDGE_VLAN_TUNNEL_VID */
nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_VLAN_TUNNEL_FLAGS */
}
bool vlan_tunid_inrange(const struct net_bridge_vlan *v_curr,
const struct net_bridge_vlan *v_last)
{
__be32 tunid_curr = tunnel_id_to_key32(v_curr->tinfo.tunnel_id);
__be32 tunid_last = tunnel_id_to_key32(v_last->tinfo.tunnel_id);
return (be32_to_cpu(tunid_curr) - be32_to_cpu(tunid_last)) == 1;
}
static int __get_num_vlan_tunnel_infos(struct net_bridge_vlan_group *vg)
{
struct net_bridge_vlan *v, *vtbegin = NULL, *vtend = NULL;
int num_tinfos = 0;
/* Count number of vlan infos */
list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
/* only a context, bridge vlan not activated */
if (!br_vlan_should_use(v) || !v->tinfo.tunnel_id)
continue;
if (!vtbegin) {
goto initvars;
} else if ((v->vid - vtend->vid) == 1 &&
vlan_tunid_inrange(v, vtend)) {
vtend = v;
continue;
} else {
if ((vtend->vid - vtbegin->vid) > 0)
num_tinfos += 2;
else
num_tinfos += 1;
}
initvars:
vtbegin = v;
vtend = v;
}
if (vtbegin && vtend) {
if ((vtend->vid - vtbegin->vid) > 0)
num_tinfos += 2;
else
num_tinfos += 1;
}
return num_tinfos;
}
int br_get_vlan_tunnel_info_size(struct net_bridge_vlan_group *vg)
{
int num_tinfos;
if (!vg)
return 0;
rcu_read_lock();
num_tinfos = __get_num_vlan_tunnel_infos(vg);
rcu_read_unlock();
return num_tinfos * __get_vlan_tinfo_size();
}
static int br_fill_vlan_tinfo(struct sk_buff *skb, u16 vid,
__be64 tunnel_id, u16 flags)
{
__be32 tid = tunnel_id_to_key32(tunnel_id);
struct nlattr *tmap;
tmap = nla_nest_start_noflag(skb, IFLA_BRIDGE_VLAN_TUNNEL_INFO);
if (!tmap)
return -EMSGSIZE;
if (nla_put_u32(skb, IFLA_BRIDGE_VLAN_TUNNEL_ID,
be32_to_cpu(tid)))
goto nla_put_failure;
if (nla_put_u16(skb, IFLA_BRIDGE_VLAN_TUNNEL_VID,
vid))
goto nla_put_failure;
if (nla_put_u16(skb, IFLA_BRIDGE_VLAN_TUNNEL_FLAGS,
flags))
goto nla_put_failure;
nla_nest_end(skb, tmap);
return 0;
nla_put_failure:
nla_nest_cancel(skb, tmap);
return -EMSGSIZE;
}
static int br_fill_vlan_tinfo_range(struct sk_buff *skb,
struct net_bridge_vlan *vtbegin,
struct net_bridge_vlan *vtend)
{
int err;
if (vtend && (vtend->vid - vtbegin->vid) > 0) {
/* add range to skb */
err = br_fill_vlan_tinfo(skb, vtbegin->vid,
vtbegin->tinfo.tunnel_id,
BRIDGE_VLAN_INFO_RANGE_BEGIN);
if (err)
return err;
err = br_fill_vlan_tinfo(skb, vtend->vid,
vtend->tinfo.tunnel_id,
BRIDGE_VLAN_INFO_RANGE_END);
if (err)
return err;
} else {
err = br_fill_vlan_tinfo(skb, vtbegin->vid,
vtbegin->tinfo.tunnel_id,
0);
if (err)
return err;
}
return 0;
}
int br_fill_vlan_tunnel_info(struct sk_buff *skb,
struct net_bridge_vlan_group *vg)
{
struct net_bridge_vlan *vtbegin = NULL;
struct net_bridge_vlan *vtend = NULL;
struct net_bridge_vlan *v;
int err;
/* Count number of vlan infos */
list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
/* only a context, bridge vlan not activated */
if (!br_vlan_should_use(v))
continue;
if (!v->tinfo.tunnel_dst)
continue;
if (!vtbegin) {
goto initvars;
} else if ((v->vid - vtend->vid) == 1 &&
vlan_tunid_inrange(v, vtend)) {
vtend = v;
continue;
} else {
err = br_fill_vlan_tinfo_range(skb, vtbegin, vtend);
if (err)
return err;
}
initvars:
vtbegin = v;
vtend = v;
}
if (vtbegin) {
err = br_fill_vlan_tinfo_range(skb, vtbegin, vtend);
if (err)
return err;
}
return 0;
}
static const struct nla_policy vlan_tunnel_policy[IFLA_BRIDGE_VLAN_TUNNEL_MAX + 1] = {
[IFLA_BRIDGE_VLAN_TUNNEL_UNSPEC] = {
.strict_start_type = IFLA_BRIDGE_VLAN_TUNNEL_FLAGS + 1
},
[IFLA_BRIDGE_VLAN_TUNNEL_ID] = { .type = NLA_U32 },
[IFLA_BRIDGE_VLAN_TUNNEL_VID] = { .type = NLA_U16 },
[IFLA_BRIDGE_VLAN_TUNNEL_FLAGS] = { .type = NLA_U16 },
};
int br_vlan_tunnel_info(const struct net_bridge_port *p, int cmd,
u16 vid, u32 tun_id, bool *changed)
{
int err = 0;
if (!p)
return -EINVAL;
switch (cmd) {
case RTM_SETLINK:
err = nbp_vlan_tunnel_info_add(p, vid, tun_id);
if (!err)
*changed = true;
break;
case RTM_DELLINK:
if (!nbp_vlan_tunnel_info_delete(p, vid))
*changed = true;
break;
}
return err;
}
int br_parse_vlan_tunnel_info(struct nlattr *attr,
struct vtunnel_info *tinfo)
{
struct nlattr *tb[IFLA_BRIDGE_VLAN_TUNNEL_MAX + 1];
u32 tun_id;
u16 vid, flags = 0;
int err;
memset(tinfo, 0, sizeof(*tinfo));
err = nla_parse_nested_deprecated(tb, IFLA_BRIDGE_VLAN_TUNNEL_MAX,
attr, vlan_tunnel_policy, NULL);
if (err < 0)
return err;
if (!tb[IFLA_BRIDGE_VLAN_TUNNEL_ID] ||
!tb[IFLA_BRIDGE_VLAN_TUNNEL_VID])
return -EINVAL;
tun_id = nla_get_u32(tb[IFLA_BRIDGE_VLAN_TUNNEL_ID]);
vid = nla_get_u16(tb[IFLA_BRIDGE_VLAN_TUNNEL_VID]);
if (vid >= VLAN_VID_MASK)
return -ERANGE;
if (tb[IFLA_BRIDGE_VLAN_TUNNEL_FLAGS])
flags = nla_get_u16(tb[IFLA_BRIDGE_VLAN_TUNNEL_FLAGS]);
tinfo->tunid = tun_id;
tinfo->vid = vid;
tinfo->flags = flags;
return 0;
}
/* send a notification if v_curr can't enter the range and start a new one */
static void __vlan_tunnel_handle_range(const struct net_bridge_port *p,
struct net_bridge_vlan **v_start,
struct net_bridge_vlan **v_end,
int v_curr, bool curr_change)
{
struct net_bridge_vlan_group *vg;
struct net_bridge_vlan *v;
vg = nbp_vlan_group(p);
if (!vg)
return;
v = br_vlan_find(vg, v_curr);
if (!*v_start)
goto out_init;
if (v && curr_change && br_vlan_can_enter_range(v, *v_end)) {
*v_end = v;
return;
}
br_vlan_notify(p->br, p, (*v_start)->vid, (*v_end)->vid, RTM_NEWVLAN);
out_init:
/* we start a range only if there are any changes to notify about */
*v_start = curr_change ? v : NULL;
*v_end = *v_start;
}
int br_process_vlan_tunnel_info(const struct net_bridge *br,
const struct net_bridge_port *p, int cmd,
struct vtunnel_info *tinfo_curr,
struct vtunnel_info *tinfo_last,
bool *changed)
{
int err;
if (tinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
if (tinfo_last->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN)
return -EINVAL;
memcpy(tinfo_last, tinfo_curr, sizeof(struct vtunnel_info));
} else if (tinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_END) {
struct net_bridge_vlan *v_start = NULL, *v_end = NULL;
int t, v;
if (!(tinfo_last->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN))
return -EINVAL;
if ((tinfo_curr->vid - tinfo_last->vid) !=
(tinfo_curr->tunid - tinfo_last->tunid))
return -EINVAL;
t = tinfo_last->tunid;
for (v = tinfo_last->vid; v <= tinfo_curr->vid; v++) {
bool curr_change = false;
err = br_vlan_tunnel_info(p, cmd, v, t, &curr_change);
if (err)
break;
t++;
if (curr_change)
*changed = curr_change;
__vlan_tunnel_handle_range(p, &v_start, &v_end, v,
curr_change);
}
if (v_start && v_end)
br_vlan_notify(br, p, v_start->vid, v_end->vid,
RTM_NEWVLAN);
if (err)
return err;
memset(tinfo_last, 0, sizeof(struct vtunnel_info));
memset(tinfo_curr, 0, sizeof(struct vtunnel_info));
} else {
if (tinfo_last->flags)
return -EINVAL;
err = br_vlan_tunnel_info(p, cmd, tinfo_curr->vid,
tinfo_curr->tunid, changed);
if (err)
return err;
br_vlan_notify(br, p, tinfo_curr->vid, 0, RTM_NEWVLAN);
memset(tinfo_last, 0, sizeof(struct vtunnel_info));
memset(tinfo_curr, 0, sizeof(struct vtunnel_info));
}
return 0;
}
| linux-master | net/bridge/br_netlink_tunnel.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <net/switchdev.h>
#include "br_private.h"
#include "br_private_tunnel.h"
static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid);
static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
const void *ptr)
{
const struct net_bridge_vlan *vle = ptr;
u16 vid = *(u16 *)arg->key;
return vle->vid != vid;
}
static const struct rhashtable_params br_vlan_rht_params = {
.head_offset = offsetof(struct net_bridge_vlan, vnode),
.key_offset = offsetof(struct net_bridge_vlan, vid),
.key_len = sizeof(u16),
.nelem_hint = 3,
.max_size = VLAN_N_VID,
.obj_cmpfn = br_vlan_cmp,
.automatic_shrinking = true,
};
static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
{
return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
}
static void __vlan_add_pvid(struct net_bridge_vlan_group *vg,
const struct net_bridge_vlan *v)
{
if (vg->pvid == v->vid)
return;
smp_wmb();
br_vlan_set_pvid_state(vg, v->state);
vg->pvid = v->vid;
}
static void __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
{
if (vg->pvid != vid)
return;
smp_wmb();
vg->pvid = 0;
}
/* Update the BRIDGE_VLAN_INFO_PVID and BRIDGE_VLAN_INFO_UNTAGGED flags of @v.
* If @commit is false, return just whether the BRIDGE_VLAN_INFO_PVID and
* BRIDGE_VLAN_INFO_UNTAGGED bits of @flags would produce any change onto @v.
*/
static bool __vlan_flags_update(struct net_bridge_vlan *v, u16 flags,
bool commit)
{
struct net_bridge_vlan_group *vg;
bool change;
if (br_vlan_is_master(v))
vg = br_vlan_group(v->br);
else
vg = nbp_vlan_group(v->port);
/* check if anything would be changed on commit */
change = !!(flags & BRIDGE_VLAN_INFO_PVID) == !!(vg->pvid != v->vid) ||
((flags ^ v->flags) & BRIDGE_VLAN_INFO_UNTAGGED);
if (!commit)
goto out;
if (flags & BRIDGE_VLAN_INFO_PVID)
__vlan_add_pvid(vg, v);
else
__vlan_delete_pvid(vg, v->vid);
if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
else
v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
out:
return change;
}
static bool __vlan_flags_would_change(struct net_bridge_vlan *v, u16 flags)
{
return __vlan_flags_update(v, flags, false);
}
static void __vlan_flags_commit(struct net_bridge_vlan *v, u16 flags)
{
__vlan_flags_update(v, flags, true);
}
static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
struct net_bridge_vlan *v, u16 flags,
struct netlink_ext_ack *extack)
{
int err;
/* Try switchdev op first. In case it is not supported, fallback to
* 8021q add.
*/
err = br_switchdev_port_vlan_add(dev, v->vid, flags, false, extack);
if (err == -EOPNOTSUPP)
return vlan_vid_add(dev, br->vlan_proto, v->vid);
v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV;
return err;
}
static void __vlan_add_list(struct net_bridge_vlan *v)
{
struct net_bridge_vlan_group *vg;
struct list_head *headp, *hpos;
struct net_bridge_vlan *vent;
if (br_vlan_is_master(v))
vg = br_vlan_group(v->br);
else
vg = nbp_vlan_group(v->port);
headp = &vg->vlan_list;
list_for_each_prev(hpos, headp) {
vent = list_entry(hpos, struct net_bridge_vlan, vlist);
if (v->vid >= vent->vid)
break;
}
list_add_rcu(&v->vlist, hpos);
}
static void __vlan_del_list(struct net_bridge_vlan *v)
{
list_del_rcu(&v->vlist);
}
static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
const struct net_bridge_vlan *v)
{
int err;
/* Try switchdev op first. In case it is not supported, fallback to
* 8021q del.
*/
err = br_switchdev_port_vlan_del(dev, v->vid);
if (!(v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV))
vlan_vid_del(dev, br->vlan_proto, v->vid);
return err == -EOPNOTSUPP ? 0 : err;
}
/* Returns a master vlan, if it didn't exist it gets created. In all cases
* a reference is taken to the master vlan before returning.
*/
static struct net_bridge_vlan *
br_vlan_get_master(struct net_bridge *br, u16 vid,
struct netlink_ext_ack *extack)
{
struct net_bridge_vlan_group *vg;
struct net_bridge_vlan *masterv;
vg = br_vlan_group(br);
masterv = br_vlan_find(vg, vid);
if (!masterv) {
bool changed;
/* missing global ctx, create it now */
if (br_vlan_add(br, vid, 0, &changed, extack))
return NULL;
masterv = br_vlan_find(vg, vid);
if (WARN_ON(!masterv))
return NULL;
refcount_set(&masterv->refcnt, 1);
return masterv;
}
refcount_inc(&masterv->refcnt);
return masterv;
}
static void br_master_vlan_rcu_free(struct rcu_head *rcu)
{
struct net_bridge_vlan *v;
v = container_of(rcu, struct net_bridge_vlan, rcu);
WARN_ON(!br_vlan_is_master(v));
free_percpu(v->stats);
v->stats = NULL;
kfree(v);
}
static void br_vlan_put_master(struct net_bridge_vlan *masterv)
{
struct net_bridge_vlan_group *vg;
if (!br_vlan_is_master(masterv))
return;
vg = br_vlan_group(masterv->br);
if (refcount_dec_and_test(&masterv->refcnt)) {
rhashtable_remove_fast(&vg->vlan_hash,
&masterv->vnode, br_vlan_rht_params);
__vlan_del_list(masterv);
br_multicast_toggle_one_vlan(masterv, false);
br_multicast_ctx_deinit(&masterv->br_mcast_ctx);
call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
}
}
static void nbp_vlan_rcu_free(struct rcu_head *rcu)
{
struct net_bridge_vlan *v;
v = container_of(rcu, struct net_bridge_vlan, rcu);
WARN_ON(br_vlan_is_master(v));
/* if we had per-port stats configured then free them here */
if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS)
free_percpu(v->stats);
v->stats = NULL;
kfree(v);
}
static void br_vlan_init_state(struct net_bridge_vlan *v)
{
struct net_bridge *br;
if (br_vlan_is_master(v))
br = v->br;
else
br = v->port->br;
if (br_opt_get(br, BROPT_MST_ENABLED)) {
br_mst_vlan_init_state(v);
return;
}
v->state = BR_STATE_FORWARDING;
v->msti = 0;
}
/* This is the shared VLAN add function which works for both ports and bridge
* devices. There are four possible calls to this function in terms of the
* vlan entry type:
* 1. vlan is being added on a port (no master flags, global entry exists)
* 2. vlan is being added on a bridge (both master and brentry flags)
* 3. vlan is being added on a port, but a global entry didn't exist which
* is being created right now (master flag set, brentry flag unset), the
* global entry is used for global per-vlan features, but not for filtering
* 4. same as 3 but with both master and brentry flags set so the entry
* will be used for filtering in both the port and the bridge
*/
static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
struct netlink_ext_ack *extack)
{
struct net_bridge_vlan *masterv = NULL;
struct net_bridge_port *p = NULL;
struct net_bridge_vlan_group *vg;
struct net_device *dev;
struct net_bridge *br;
int err;
if (br_vlan_is_master(v)) {
br = v->br;
dev = br->dev;
vg = br_vlan_group(br);
} else {
p = v->port;
br = p->br;
dev = p->dev;
vg = nbp_vlan_group(p);
}
if (p) {
/* Add VLAN to the device filter if it is supported.
* This ensures tagged traffic enters the bridge when
* promiscuous mode is disabled by br_manage_promisc().
*/
err = __vlan_vid_add(dev, br, v, flags, extack);
if (err)
goto out;
/* need to work on the master vlan too */
if (flags & BRIDGE_VLAN_INFO_MASTER) {
bool changed;
err = br_vlan_add(br, v->vid,
flags | BRIDGE_VLAN_INFO_BRENTRY,
&changed, extack);
if (err)
goto out_filt;
if (changed)
br_vlan_notify(br, NULL, v->vid, 0,
RTM_NEWVLAN);
}
masterv = br_vlan_get_master(br, v->vid, extack);
if (!masterv) {
err = -ENOMEM;
goto out_filt;
}
v->brvlan = masterv;
if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
v->stats =
netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!v->stats) {
err = -ENOMEM;
goto out_filt;
}
v->priv_flags |= BR_VLFLAG_PER_PORT_STATS;
} else {
v->stats = masterv->stats;
}
br_multicast_port_ctx_init(p, v, &v->port_mcast_ctx);
} else {
if (br_vlan_should_use(v)) {
err = br_switchdev_port_vlan_add(dev, v->vid, flags,
false, extack);
if (err && err != -EOPNOTSUPP)
goto out;
}
br_multicast_ctx_init(br, v, &v->br_mcast_ctx);
v->priv_flags |= BR_VLFLAG_GLOBAL_MCAST_ENABLED;
}
/* Add the dev mac and count the vlan only if it's usable */
if (br_vlan_should_use(v)) {
err = br_fdb_add_local(br, p, dev->dev_addr, v->vid);
if (err) {
br_err(br, "failed insert local address into bridge forwarding table\n");
goto out_filt;
}
vg->num_vlans++;
}
/* set the state before publishing */
br_vlan_init_state(v);
err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
br_vlan_rht_params);
if (err)
goto out_fdb_insert;
__vlan_add_list(v);
__vlan_flags_commit(v, flags);
br_multicast_toggle_one_vlan(v, true);
if (p)
nbp_vlan_set_vlan_dev_state(p, v->vid);
out:
return err;
out_fdb_insert:
if (br_vlan_should_use(v)) {
br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
vg->num_vlans--;
}
out_filt:
if (p) {
__vlan_vid_del(dev, br, v);
if (masterv) {
if (v->stats && masterv->stats != v->stats)
free_percpu(v->stats);
v->stats = NULL;
br_vlan_put_master(masterv);
v->brvlan = NULL;
}
} else {
br_switchdev_port_vlan_del(dev, v->vid);
}
goto out;
}
static int __vlan_del(struct net_bridge_vlan *v)
{
struct net_bridge_vlan *masterv = v;
struct net_bridge_vlan_group *vg;
struct net_bridge_port *p = NULL;
int err = 0;
if (br_vlan_is_master(v)) {
vg = br_vlan_group(v->br);
} else {
p = v->port;
vg = nbp_vlan_group(v->port);
masterv = v->brvlan;
}
__vlan_delete_pvid(vg, v->vid);
if (p) {
err = __vlan_vid_del(p->dev, p->br, v);
if (err)
goto out;
} else {
err = br_switchdev_port_vlan_del(v->br->dev, v->vid);
if (err && err != -EOPNOTSUPP)
goto out;
err = 0;
}
if (br_vlan_should_use(v)) {
v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
vg->num_vlans--;
}
if (masterv != v) {
vlan_tunnel_info_del(vg, v);
rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
br_vlan_rht_params);
__vlan_del_list(v);
nbp_vlan_set_vlan_dev_state(p, v->vid);
br_multicast_toggle_one_vlan(v, false);
br_multicast_port_ctx_deinit(&v->port_mcast_ctx);
call_rcu(&v->rcu, nbp_vlan_rcu_free);
}
br_vlan_put_master(masterv);
out:
return err;
}
static void __vlan_group_free(struct net_bridge_vlan_group *vg)
{
WARN_ON(!list_empty(&vg->vlan_list));
rhashtable_destroy(&vg->vlan_hash);
vlan_tunnel_deinit(vg);
kfree(vg);
}
static void __vlan_flush(const struct net_bridge *br,
const struct net_bridge_port *p,
struct net_bridge_vlan_group *vg)
{
struct net_bridge_vlan *vlan, *tmp;
u16 v_start = 0, v_end = 0;
int err;
__vlan_delete_pvid(vg, vg->pvid);
list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist) {
/* take care of disjoint ranges */
if (!v_start) {
v_start = vlan->vid;
} else if (vlan->vid - v_end != 1) {
/* found range end, notify and start next one */
br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN);
v_start = vlan->vid;
}
v_end = vlan->vid;
err = __vlan_del(vlan);
if (err) {
br_err(br,
"port %u(%s) failed to delete vlan %d: %pe\n",
(unsigned int) p->port_no, p->dev->name,
vlan->vid, ERR_PTR(err));
}
}
/* notify about the last/whole vlan range */
if (v_start)
br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN);
}
struct sk_buff *br_handle_vlan(struct net_bridge *br,
const struct net_bridge_port *p,
struct net_bridge_vlan_group *vg,
struct sk_buff *skb)
{
struct pcpu_sw_netstats *stats;
struct net_bridge_vlan *v;
u16 vid;
/* If this packet was not filtered at input, let it pass */
if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
goto out;
/* At this point, we know that the frame was filtered and contains
* a valid vlan id. If the vlan id has untagged flag set,
* send untagged; otherwise, send tagged.
*/
br_vlan_get_tag(skb, &vid);
v = br_vlan_find(vg, vid);
/* Vlan entry must be configured at this point. The
* only exception is the bridge is set in promisc mode and the
* packet is destined for the bridge device. In this case
* pass the packet as is.
*/
if (!v || !br_vlan_should_use(v)) {
if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
goto out;
} else {
kfree_skb(skb);
return NULL;
}
}
if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
stats = this_cpu_ptr(v->stats);
u64_stats_update_begin(&stats->syncp);
u64_stats_add(&stats->tx_bytes, skb->len);
u64_stats_inc(&stats->tx_packets);
u64_stats_update_end(&stats->syncp);
}
/* If the skb will be sent using forwarding offload, the assumption is
* that the switchdev will inject the packet into hardware together
* with the bridge VLAN, so that it can be forwarded according to that
* VLAN. The switchdev should deal with popping the VLAN header in
* hardware on each egress port as appropriate. So only strip the VLAN
* header if forwarding offload is not being used.
*/
if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED &&
!br_switchdev_frame_uses_tx_fwd_offload(skb))
__vlan_hwaccel_clear_tag(skb);
if (p && (p->flags & BR_VLAN_TUNNEL) &&
br_handle_egress_vlan_tunnel(skb, v)) {
kfree_skb(skb);
return NULL;
}
out:
return skb;
}
/* Called under RCU */
static bool __allowed_ingress(const struct net_bridge *br,
struct net_bridge_vlan_group *vg,
struct sk_buff *skb, u16 *vid,
u8 *state,
struct net_bridge_vlan **vlan)
{
struct pcpu_sw_netstats *stats;
struct net_bridge_vlan *v;
bool tagged;
BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
/* If vlan tx offload is disabled on bridge device and frame was
* sent from vlan device on the bridge device, it does not have
* HW accelerated vlan tag.
*/
if (unlikely(!skb_vlan_tag_present(skb) &&
skb->protocol == br->vlan_proto)) {
skb = skb_vlan_untag(skb);
if (unlikely(!skb))
return false;
}
if (!br_vlan_get_tag(skb, vid)) {
/* Tagged frame */
if (skb->vlan_proto != br->vlan_proto) {
/* Protocol-mismatch, empty out vlan_tci for new tag */
skb_push(skb, ETH_HLEN);
skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
skb_vlan_tag_get(skb));
if (unlikely(!skb))
return false;
skb_pull(skb, ETH_HLEN);
skb_reset_mac_len(skb);
*vid = 0;
tagged = false;
} else {
tagged = true;
}
} else {
/* Untagged frame */
tagged = false;
}
if (!*vid) {
u16 pvid = br_get_pvid(vg);
/* Frame had a tag with VID 0 or did not have a tag.
* See if pvid is set on this port. That tells us which
* vlan untagged or priority-tagged traffic belongs to.
*/
if (!pvid)
goto drop;
/* PVID is set on this port. Any untagged or priority-tagged
* ingress frame is considered to belong to this vlan.
*/
*vid = pvid;
if (likely(!tagged))
/* Untagged Frame. */
__vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
else
/* Priority-tagged Frame.
* At this point, we know that skb->vlan_tci VID
* field was 0.
* We update only VID field and preserve PCP field.
*/
skb->vlan_tci |= pvid;
/* if snooping and stats are disabled we can avoid the lookup */
if (!br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) &&
!br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
if (*state == BR_STATE_FORWARDING) {
*state = br_vlan_get_pvid_state(vg);
if (!br_vlan_state_allowed(*state, true))
goto drop;
}
return true;
}
}
v = br_vlan_find(vg, *vid);
if (!v || !br_vlan_should_use(v))
goto drop;
if (*state == BR_STATE_FORWARDING) {
*state = br_vlan_get_state(v);
if (!br_vlan_state_allowed(*state, true))
goto drop;
}
if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
stats = this_cpu_ptr(v->stats);
u64_stats_update_begin(&stats->syncp);
u64_stats_add(&stats->rx_bytes, skb->len);
u64_stats_inc(&stats->rx_packets);
u64_stats_update_end(&stats->syncp);
}
*vlan = v;
return true;
drop:
kfree_skb(skb);
return false;
}
bool br_allowed_ingress(const struct net_bridge *br,
struct net_bridge_vlan_group *vg, struct sk_buff *skb,
u16 *vid, u8 *state,
struct net_bridge_vlan **vlan)
{
/* If VLAN filtering is disabled on the bridge, all packets are
* permitted.
*/
*vlan = NULL;
if (!br_opt_get(br, BROPT_VLAN_ENABLED)) {
BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
return true;
}
return __allowed_ingress(br, vg, skb, vid, state, vlan);
}
/* Called under RCU. */
bool br_allowed_egress(struct net_bridge_vlan_group *vg,
const struct sk_buff *skb)
{
const struct net_bridge_vlan *v;
u16 vid;
/* If this packet was not filtered at input, let it pass */
if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
return true;
br_vlan_get_tag(skb, &vid);
v = br_vlan_find(vg, vid);
if (v && br_vlan_should_use(v) &&
br_vlan_state_allowed(br_vlan_get_state(v), false))
return true;
return false;
}
/* Called under RCU */
bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
{
struct net_bridge_vlan_group *vg;
struct net_bridge *br = p->br;
struct net_bridge_vlan *v;
/* If filtering was disabled at input, let it pass. */
if (!br_opt_get(br, BROPT_VLAN_ENABLED))
return true;
vg = nbp_vlan_group_rcu(p);
if (!vg || !vg->num_vlans)
return false;
if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
*vid = 0;
if (!*vid) {
*vid = br_get_pvid(vg);
if (!*vid ||
!br_vlan_state_allowed(br_vlan_get_pvid_state(vg), true))
return false;
return true;
}
v = br_vlan_find(vg, *vid);
if (v && br_vlan_state_allowed(br_vlan_get_state(v), true))
return true;
return false;
}
static int br_vlan_add_existing(struct net_bridge *br,
struct net_bridge_vlan_group *vg,
struct net_bridge_vlan *vlan,
u16 flags, bool *changed,
struct netlink_ext_ack *extack)
{
bool would_change = __vlan_flags_would_change(vlan, flags);
bool becomes_brentry = false;
int err;
if (!br_vlan_is_brentry(vlan)) {
/* Trying to change flags of non-existent bridge vlan */
if (!(flags & BRIDGE_VLAN_INFO_BRENTRY))
return -EINVAL;
becomes_brentry = true;
}
/* Master VLANs that aren't brentries weren't notified before,
* time to notify them now.
*/
if (becomes_brentry || would_change) {
err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags,
would_change, extack);
if (err && err != -EOPNOTSUPP)
return err;
}
if (becomes_brentry) {
/* It was only kept for port vlans, now make it real */
err = br_fdb_add_local(br, NULL, br->dev->dev_addr, vlan->vid);
if (err) {
br_err(br, "failed to insert local address into bridge forwarding table\n");
goto err_fdb_insert;
}
refcount_inc(&vlan->refcnt);
vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
vg->num_vlans++;
*changed = true;
br_multicast_toggle_one_vlan(vlan, true);
}
__vlan_flags_commit(vlan, flags);
if (would_change)
*changed = true;
return 0;
err_fdb_insert:
br_switchdev_port_vlan_del(br->dev, vlan->vid);
return err;
}
/* Must be protected by RTNL.
* Must be called with vid in range from 1 to 4094 inclusive.
* changed must be true only if the vlan was created or updated
*/
int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed,
struct netlink_ext_ack *extack)
{
struct net_bridge_vlan_group *vg;
struct net_bridge_vlan *vlan;
int ret;
ASSERT_RTNL();
*changed = false;
vg = br_vlan_group(br);
vlan = br_vlan_find(vg, vid);
if (vlan)
return br_vlan_add_existing(br, vg, vlan, flags, changed,
extack);
vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
if (!vlan)
return -ENOMEM;
vlan->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!vlan->stats) {
kfree(vlan);
return -ENOMEM;
}
vlan->vid = vid;
vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
vlan->br = br;
if (flags & BRIDGE_VLAN_INFO_BRENTRY)
refcount_set(&vlan->refcnt, 1);
ret = __vlan_add(vlan, flags, extack);
if (ret) {
free_percpu(vlan->stats);
kfree(vlan);
} else {
*changed = true;
}
return ret;
}
/* Must be protected by RTNL.
* Must be called with vid in range from 1 to 4094 inclusive.
*/
int br_vlan_delete(struct net_bridge *br, u16 vid)
{
struct net_bridge_vlan_group *vg;
struct net_bridge_vlan *v;
ASSERT_RTNL();
vg = br_vlan_group(br);
v = br_vlan_find(vg, vid);
if (!v || !br_vlan_is_brentry(v))
return -ENOENT;
br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
br_fdb_delete_by_port(br, NULL, vid, 0);
vlan_tunnel_info_del(vg, v);
return __vlan_del(v);
}
void br_vlan_flush(struct net_bridge *br)
{
struct net_bridge_vlan_group *vg;
ASSERT_RTNL();
vg = br_vlan_group(br);
__vlan_flush(br, NULL, vg);
RCU_INIT_POINTER(br->vlgrp, NULL);
synchronize_rcu();
__vlan_group_free(vg);
}
struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
{
if (!vg)
return NULL;
return br_vlan_lookup(&vg->vlan_hash, vid);
}
/* Must be protected by RTNL. */
static void recalculate_group_addr(struct net_bridge *br)
{
if (br_opt_get(br, BROPT_GROUP_ADDR_SET))
return;
spin_lock_bh(&br->lock);
if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
br->vlan_proto == htons(ETH_P_8021Q)) {
/* Bridge Group Address */
br->group_addr[5] = 0x00;
} else { /* vlan_enabled && ETH_P_8021AD */
/* Provider Bridge Group Address */
br->group_addr[5] = 0x08;
}
spin_unlock_bh(&br->lock);
}
/* Must be protected by RTNL. */
void br_recalculate_fwd_mask(struct net_bridge *br)
{
if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
br->vlan_proto == htons(ETH_P_8021Q))
br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
else /* vlan_enabled && ETH_P_8021AD */
br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
~(1u << br->group_addr[5]);
}
int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
struct switchdev_attr attr = {
.orig_dev = br->dev,
.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
.u.vlan_filtering = val,
};
int err;
if (br_opt_get(br, BROPT_VLAN_ENABLED) == !!val)
return 0;
br_opt_toggle(br, BROPT_VLAN_ENABLED, !!val);
err = switchdev_port_attr_set(br->dev, &attr, extack);
if (err && err != -EOPNOTSUPP) {
br_opt_toggle(br, BROPT_VLAN_ENABLED, !val);
return err;
}
br_manage_promisc(br);
recalculate_group_addr(br);
br_recalculate_fwd_mask(br);
if (!val && br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
br_info(br, "vlan filtering disabled, automatically disabling multicast vlan snooping\n");
br_multicast_toggle_vlan_snooping(br, false, NULL);
}
return 0;
}
bool br_vlan_enabled(const struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
return br_opt_get(br, BROPT_VLAN_ENABLED);
}
EXPORT_SYMBOL_GPL(br_vlan_enabled);
int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto)
{
struct net_bridge *br = netdev_priv(dev);
*p_proto = ntohs(br->vlan_proto);
return 0;
}
EXPORT_SYMBOL_GPL(br_vlan_get_proto);
int __br_vlan_set_proto(struct net_bridge *br, __be16 proto,
struct netlink_ext_ack *extack)
{
struct switchdev_attr attr = {
.orig_dev = br->dev,
.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL,
.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
.u.vlan_protocol = ntohs(proto),
};
int err = 0;
struct net_bridge_port *p;
struct net_bridge_vlan *vlan;
struct net_bridge_vlan_group *vg;
__be16 oldproto = br->vlan_proto;
if (br->vlan_proto == proto)
return 0;
err = switchdev_port_attr_set(br->dev, &attr, extack);
if (err && err != -EOPNOTSUPP)
return err;
/* Add VLANs for the new proto to the device filter. */
list_for_each_entry(p, &br->port_list, list) {
vg = nbp_vlan_group(p);
list_for_each_entry(vlan, &vg->vlan_list, vlist) {
if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
continue;
err = vlan_vid_add(p->dev, proto, vlan->vid);
if (err)
goto err_filt;
}
}
br->vlan_proto = proto;
recalculate_group_addr(br);
br_recalculate_fwd_mask(br);
/* Delete VLANs for the old proto from the device filter. */
list_for_each_entry(p, &br->port_list, list) {
vg = nbp_vlan_group(p);
list_for_each_entry(vlan, &vg->vlan_list, vlist) {
if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
continue;
vlan_vid_del(p->dev, oldproto, vlan->vid);
}
}
return 0;
err_filt:
attr.u.vlan_protocol = ntohs(oldproto);
switchdev_port_attr_set(br->dev, &attr, NULL);
list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist) {
if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
continue;
vlan_vid_del(p->dev, proto, vlan->vid);
}
list_for_each_entry_continue_reverse(p, &br->port_list, list) {
vg = nbp_vlan_group(p);
list_for_each_entry(vlan, &vg->vlan_list, vlist) {
if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
continue;
vlan_vid_del(p->dev, proto, vlan->vid);
}
}
return err;
}
int br_vlan_set_proto(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
if (!eth_type_vlan(htons(val)))
return -EPROTONOSUPPORT;
return __br_vlan_set_proto(br, htons(val), extack);
}
int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
{
switch (val) {
case 0:
case 1:
br_opt_toggle(br, BROPT_VLAN_STATS_ENABLED, !!val);
break;
default:
return -EINVAL;
}
return 0;
}
int br_vlan_set_stats_per_port(struct net_bridge *br, unsigned long val)
{
struct net_bridge_port *p;
/* allow to change the option if there are no port vlans configured */
list_for_each_entry(p, &br->port_list, list) {
struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
if (vg->num_vlans)
return -EBUSY;
}
switch (val) {
case 0:
case 1:
br_opt_toggle(br, BROPT_VLAN_STATS_PER_PORT, !!val);
break;
default:
return -EINVAL;
}
return 0;
}
static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
{
struct net_bridge_vlan *v;
if (vid != vg->pvid)
return false;
v = br_vlan_lookup(&vg->vlan_hash, vid);
if (v && br_vlan_should_use(v) &&
(v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
return true;
return false;
}
static void br_vlan_disable_default_pvid(struct net_bridge *br)
{
struct net_bridge_port *p;
u16 pvid = br->default_pvid;
/* Disable default_pvid on all ports where it is still
* configured.
*/
if (vlan_default_pvid(br_vlan_group(br), pvid)) {
if (!br_vlan_delete(br, pvid))
br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN);
}
list_for_each_entry(p, &br->port_list, list) {
if (vlan_default_pvid(nbp_vlan_group(p), pvid) &&
!nbp_vlan_delete(p, pvid))
br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN);
}
br->default_pvid = 0;
}
int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
struct netlink_ext_ack *extack)
{
const struct net_bridge_vlan *pvent;
struct net_bridge_vlan_group *vg;
struct net_bridge_port *p;
unsigned long *changed;
bool vlchange;
u16 old_pvid;
int err = 0;
if (!pvid) {
br_vlan_disable_default_pvid(br);
return 0;
}
changed = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
if (!changed)
return -ENOMEM;
old_pvid = br->default_pvid;
/* Update default_pvid config only if we do not conflict with
* user configuration.
*/
vg = br_vlan_group(br);
pvent = br_vlan_find(vg, pvid);
if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
(!pvent || !br_vlan_should_use(pvent))) {
err = br_vlan_add(br, pvid,
BRIDGE_VLAN_INFO_PVID |
BRIDGE_VLAN_INFO_UNTAGGED |
BRIDGE_VLAN_INFO_BRENTRY,
&vlchange, extack);
if (err)
goto out;
if (br_vlan_delete(br, old_pvid))
br_vlan_notify(br, NULL, old_pvid, 0, RTM_DELVLAN);
br_vlan_notify(br, NULL, pvid, 0, RTM_NEWVLAN);
__set_bit(0, changed);
}
list_for_each_entry(p, &br->port_list, list) {
/* Update default_pvid config only if we do not conflict with
* user configuration.
*/
vg = nbp_vlan_group(p);
if ((old_pvid &&
!vlan_default_pvid(vg, old_pvid)) ||
br_vlan_find(vg, pvid))
continue;
err = nbp_vlan_add(p, pvid,
BRIDGE_VLAN_INFO_PVID |
BRIDGE_VLAN_INFO_UNTAGGED,
&vlchange, extack);
if (err)
goto err_port;
if (nbp_vlan_delete(p, old_pvid))
br_vlan_notify(br, p, old_pvid, 0, RTM_DELVLAN);
br_vlan_notify(p->br, p, pvid, 0, RTM_NEWVLAN);
__set_bit(p->port_no, changed);
}
br->default_pvid = pvid;
out:
bitmap_free(changed);
return err;
err_port:
list_for_each_entry_continue_reverse(p, &br->port_list, list) {
if (!test_bit(p->port_no, changed))
continue;
if (old_pvid) {
nbp_vlan_add(p, old_pvid,
BRIDGE_VLAN_INFO_PVID |
BRIDGE_VLAN_INFO_UNTAGGED,
&vlchange, NULL);
br_vlan_notify(p->br, p, old_pvid, 0, RTM_NEWVLAN);
}
nbp_vlan_delete(p, pvid);
br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN);
}
if (test_bit(0, changed)) {
if (old_pvid) {
br_vlan_add(br, old_pvid,
BRIDGE_VLAN_INFO_PVID |
BRIDGE_VLAN_INFO_UNTAGGED |
BRIDGE_VLAN_INFO_BRENTRY,
&vlchange, NULL);
br_vlan_notify(br, NULL, old_pvid, 0, RTM_NEWVLAN);
}
br_vlan_delete(br, pvid);
br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN);
}
goto out;
}
int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
u16 pvid = val;
int err = 0;
if (val >= VLAN_VID_MASK)
return -EINVAL;
if (pvid == br->default_pvid)
goto out;
/* Only allow default pvid change when filtering is disabled */
if (br_opt_get(br, BROPT_VLAN_ENABLED)) {
pr_info_once("Please disable vlan filtering to change default_pvid\n");
err = -EPERM;
goto out;
}
err = __br_vlan_set_default_pvid(br, pvid, extack);
out:
return err;
}
int br_vlan_init(struct net_bridge *br)
{
struct net_bridge_vlan_group *vg;
int ret = -ENOMEM;
vg = kzalloc(sizeof(*vg), GFP_KERNEL);
if (!vg)
goto out;
ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
if (ret)
goto err_rhtbl;
ret = vlan_tunnel_init(vg);
if (ret)
goto err_tunnel_init;
INIT_LIST_HEAD(&vg->vlan_list);
br->vlan_proto = htons(ETH_P_8021Q);
br->default_pvid = 1;
rcu_assign_pointer(br->vlgrp, vg);
out:
return ret;
err_tunnel_init:
rhashtable_destroy(&vg->vlan_hash);
err_rhtbl:
kfree(vg);
goto out;
}
int nbp_vlan_init(struct net_bridge_port *p, struct netlink_ext_ack *extack)
{
struct switchdev_attr attr = {
.orig_dev = p->br->dev,
.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
.u.vlan_filtering = br_opt_get(p->br, BROPT_VLAN_ENABLED),
};
struct net_bridge_vlan_group *vg;
int ret = -ENOMEM;
vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
if (!vg)
goto out;
ret = switchdev_port_attr_set(p->dev, &attr, extack);
if (ret && ret != -EOPNOTSUPP)
goto err_vlan_enabled;
ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
if (ret)
goto err_rhtbl;
ret = vlan_tunnel_init(vg);
if (ret)
goto err_tunnel_init;
INIT_LIST_HEAD(&vg->vlan_list);
rcu_assign_pointer(p->vlgrp, vg);
if (p->br->default_pvid) {
bool changed;
ret = nbp_vlan_add(p, p->br->default_pvid,
BRIDGE_VLAN_INFO_PVID |
BRIDGE_VLAN_INFO_UNTAGGED,
&changed, extack);
if (ret)
goto err_vlan_add;
br_vlan_notify(p->br, p, p->br->default_pvid, 0, RTM_NEWVLAN);
}
out:
return ret;
err_vlan_add:
RCU_INIT_POINTER(p->vlgrp, NULL);
synchronize_rcu();
vlan_tunnel_deinit(vg);
err_tunnel_init:
rhashtable_destroy(&vg->vlan_hash);
err_rhtbl:
err_vlan_enabled:
kfree(vg);
goto out;
}
/* Must be protected by RTNL.
* Must be called with vid in range from 1 to 4094 inclusive.
* changed must be true only if the vlan was created or updated
*/
int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags,
bool *changed, struct netlink_ext_ack *extack)
{
struct net_bridge_vlan *vlan;
int ret;
ASSERT_RTNL();
*changed = false;
vlan = br_vlan_find(nbp_vlan_group(port), vid);
if (vlan) {
bool would_change = __vlan_flags_would_change(vlan, flags);
if (would_change) {
/* Pass the flags to the hardware bridge */
ret = br_switchdev_port_vlan_add(port->dev, vid, flags,
true, extack);
if (ret && ret != -EOPNOTSUPP)
return ret;
}
__vlan_flags_commit(vlan, flags);
*changed = would_change;
return 0;
}
vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
if (!vlan)
return -ENOMEM;
vlan->vid = vid;
vlan->port = port;
ret = __vlan_add(vlan, flags, extack);
if (ret)
kfree(vlan);
else
*changed = true;
return ret;
}
/* Must be protected by RTNL.
* Must be called with vid in range from 1 to 4094 inclusive.
*/
int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
{
struct net_bridge_vlan *v;
ASSERT_RTNL();
v = br_vlan_find(nbp_vlan_group(port), vid);
if (!v)
return -ENOENT;
br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
br_fdb_delete_by_port(port->br, port, vid, 0);
return __vlan_del(v);
}
void nbp_vlan_flush(struct net_bridge_port *port)
{
struct net_bridge_vlan_group *vg;
ASSERT_RTNL();
vg = nbp_vlan_group(port);
__vlan_flush(port->br, port, vg);
RCU_INIT_POINTER(port->vlgrp, NULL);
synchronize_rcu();
__vlan_group_free(vg);
}
void br_vlan_get_stats(const struct net_bridge_vlan *v,
struct pcpu_sw_netstats *stats)
{
int i;
memset(stats, 0, sizeof(*stats));
for_each_possible_cpu(i) {
u64 rxpackets, rxbytes, txpackets, txbytes;
struct pcpu_sw_netstats *cpu_stats;
unsigned int start;
cpu_stats = per_cpu_ptr(v->stats, i);
do {
start = u64_stats_fetch_begin(&cpu_stats->syncp);
rxpackets = u64_stats_read(&cpu_stats->rx_packets);
rxbytes = u64_stats_read(&cpu_stats->rx_bytes);
txbytes = u64_stats_read(&cpu_stats->tx_bytes);
txpackets = u64_stats_read(&cpu_stats->tx_packets);
} while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
u64_stats_add(&stats->rx_packets, rxpackets);
u64_stats_add(&stats->rx_bytes, rxbytes);
u64_stats_add(&stats->tx_bytes, txbytes);
u64_stats_add(&stats->tx_packets, txpackets);
}
}
int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
{
struct net_bridge_vlan_group *vg;
struct net_bridge_port *p;
ASSERT_RTNL();
p = br_port_get_check_rtnl(dev);
if (p)
vg = nbp_vlan_group(p);
else if (netif_is_bridge_master(dev))
vg = br_vlan_group(netdev_priv(dev));
else
return -EINVAL;
*p_pvid = br_get_pvid(vg);
return 0;
}
EXPORT_SYMBOL_GPL(br_vlan_get_pvid);
int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid)
{
struct net_bridge_vlan_group *vg;
struct net_bridge_port *p;
p = br_port_get_check_rcu(dev);
if (p)
vg = nbp_vlan_group_rcu(p);
else if (netif_is_bridge_master(dev))
vg = br_vlan_group_rcu(netdev_priv(dev));
else
return -EINVAL;
*p_pvid = br_get_pvid(vg);
return 0;
}
EXPORT_SYMBOL_GPL(br_vlan_get_pvid_rcu);
void br_vlan_fill_forward_path_pvid(struct net_bridge *br,
struct net_device_path_ctx *ctx,
struct net_device_path *path)
{
struct net_bridge_vlan_group *vg;
int idx = ctx->num_vlans - 1;
u16 vid;
path->bridge.vlan_mode = DEV_PATH_BR_VLAN_KEEP;
if (!br_opt_get(br, BROPT_VLAN_ENABLED))
return;
vg = br_vlan_group(br);
if (idx >= 0 &&
ctx->vlan[idx].proto == br->vlan_proto) {
vid = ctx->vlan[idx].id;
} else {
path->bridge.vlan_mode = DEV_PATH_BR_VLAN_TAG;
vid = br_get_pvid(vg);
}
path->bridge.vlan_id = vid;
path->bridge.vlan_proto = br->vlan_proto;
}
int br_vlan_fill_forward_path_mode(struct net_bridge *br,
struct net_bridge_port *dst,
struct net_device_path *path)
{
struct net_bridge_vlan_group *vg;
struct net_bridge_vlan *v;
if (!br_opt_get(br, BROPT_VLAN_ENABLED))
return 0;
vg = nbp_vlan_group_rcu(dst);
v = br_vlan_find(vg, path->bridge.vlan_id);
if (!v || !br_vlan_should_use(v))
return -EINVAL;
if (!(v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
return 0;
if (path->bridge.vlan_mode == DEV_PATH_BR_VLAN_TAG)
path->bridge.vlan_mode = DEV_PATH_BR_VLAN_KEEP;
else if (v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
path->bridge.vlan_mode = DEV_PATH_BR_VLAN_UNTAG_HW;
else
path->bridge.vlan_mode = DEV_PATH_BR_VLAN_UNTAG;
return 0;
}
int br_vlan_get_info(const struct net_device *dev, u16 vid,
struct bridge_vlan_info *p_vinfo)
{
struct net_bridge_vlan_group *vg;
struct net_bridge_vlan *v;
struct net_bridge_port *p;
ASSERT_RTNL();
p = br_port_get_check_rtnl(dev);
if (p)
vg = nbp_vlan_group(p);
else if (netif_is_bridge_master(dev))
vg = br_vlan_group(netdev_priv(dev));
else
return -EINVAL;
v = br_vlan_find(vg, vid);
if (!v)
return -ENOENT;
p_vinfo->vid = vid;
p_vinfo->flags = v->flags;
if (vid == br_get_pvid(vg))
p_vinfo->flags |= BRIDGE_VLAN_INFO_PVID;
return 0;
}
EXPORT_SYMBOL_GPL(br_vlan_get_info);
int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid,
struct bridge_vlan_info *p_vinfo)
{
struct net_bridge_vlan_group *vg;
struct net_bridge_vlan *v;
struct net_bridge_port *p;
p = br_port_get_check_rcu(dev);
if (p)
vg = nbp_vlan_group_rcu(p);
else if (netif_is_bridge_master(dev))
vg = br_vlan_group_rcu(netdev_priv(dev));
else
return -EINVAL;
v = br_vlan_find(vg, vid);
if (!v)
return -ENOENT;
p_vinfo->vid = vid;
p_vinfo->flags = v->flags;
if (vid == br_get_pvid(vg))
p_vinfo->flags |= BRIDGE_VLAN_INFO_PVID;
return 0;
}
EXPORT_SYMBOL_GPL(br_vlan_get_info_rcu);
static int br_vlan_is_bind_vlan_dev(const struct net_device *dev)
{
return is_vlan_dev(dev) &&
!!(vlan_dev_priv(dev)->flags & VLAN_FLAG_BRIDGE_BINDING);
}
static int br_vlan_is_bind_vlan_dev_fn(struct net_device *dev,
__always_unused struct netdev_nested_priv *priv)
{
return br_vlan_is_bind_vlan_dev(dev);
}
static bool br_vlan_has_upper_bind_vlan_dev(struct net_device *dev)
{
int found;
rcu_read_lock();
found = netdev_walk_all_upper_dev_rcu(dev, br_vlan_is_bind_vlan_dev_fn,
NULL);
rcu_read_unlock();
return !!found;
}
struct br_vlan_bind_walk_data {
u16 vid;
struct net_device *result;
};
static int br_vlan_match_bind_vlan_dev_fn(struct net_device *dev,
struct netdev_nested_priv *priv)
{
struct br_vlan_bind_walk_data *data = priv->data;
int found = 0;
if (br_vlan_is_bind_vlan_dev(dev) &&
vlan_dev_priv(dev)->vlan_id == data->vid) {
data->result = dev;
found = 1;
}
return found;
}
static struct net_device *
br_vlan_get_upper_bind_vlan_dev(struct net_device *dev, u16 vid)
{
struct br_vlan_bind_walk_data data = {
.vid = vid,
};
struct netdev_nested_priv priv = {
.data = (void *)&data,
};
rcu_read_lock();
netdev_walk_all_upper_dev_rcu(dev, br_vlan_match_bind_vlan_dev_fn,
&priv);
rcu_read_unlock();
return data.result;
}
static bool br_vlan_is_dev_up(const struct net_device *dev)
{
return !!(dev->flags & IFF_UP) && netif_oper_up(dev);
}
static void br_vlan_set_vlan_dev_state(const struct net_bridge *br,
struct net_device *vlan_dev)
{
u16 vid = vlan_dev_priv(vlan_dev)->vlan_id;
struct net_bridge_vlan_group *vg;
struct net_bridge_port *p;
bool has_carrier = false;
if (!netif_carrier_ok(br->dev)) {
netif_carrier_off(vlan_dev);
return;
}
list_for_each_entry(p, &br->port_list, list) {
vg = nbp_vlan_group(p);
if (br_vlan_find(vg, vid) && br_vlan_is_dev_up(p->dev)) {
has_carrier = true;
break;
}
}
if (has_carrier)
netif_carrier_on(vlan_dev);
else
netif_carrier_off(vlan_dev);
}
static void br_vlan_set_all_vlan_dev_state(struct net_bridge_port *p)
{
struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
struct net_bridge_vlan *vlan;
struct net_device *vlan_dev;
list_for_each_entry(vlan, &vg->vlan_list, vlist) {
vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev,
vlan->vid);
if (vlan_dev) {
if (br_vlan_is_dev_up(p->dev)) {
if (netif_carrier_ok(p->br->dev))
netif_carrier_on(vlan_dev);
} else {
br_vlan_set_vlan_dev_state(p->br, vlan_dev);
}
}
}
}
static void br_vlan_upper_change(struct net_device *dev,
struct net_device *upper_dev,
bool linking)
{
struct net_bridge *br = netdev_priv(dev);
if (!br_vlan_is_bind_vlan_dev(upper_dev))
return;
if (linking) {
br_vlan_set_vlan_dev_state(br, upper_dev);
br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING, true);
} else {
br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING,
br_vlan_has_upper_bind_vlan_dev(dev));
}
}
struct br_vlan_link_state_walk_data {
struct net_bridge *br;
};
static int br_vlan_link_state_change_fn(struct net_device *vlan_dev,
struct netdev_nested_priv *priv)
{
struct br_vlan_link_state_walk_data *data = priv->data;
if (br_vlan_is_bind_vlan_dev(vlan_dev))
br_vlan_set_vlan_dev_state(data->br, vlan_dev);
return 0;
}
static void br_vlan_link_state_change(struct net_device *dev,
struct net_bridge *br)
{
struct br_vlan_link_state_walk_data data = {
.br = br
};
struct netdev_nested_priv priv = {
.data = (void *)&data,
};
rcu_read_lock();
netdev_walk_all_upper_dev_rcu(dev, br_vlan_link_state_change_fn,
&priv);
rcu_read_unlock();
}
/* Must be protected by RTNL. */
static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid)
{
struct net_device *vlan_dev;
if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
return;
vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev, vid);
if (vlan_dev)
br_vlan_set_vlan_dev_state(p->br, vlan_dev);
}
/* Must be protected by RTNL. */
int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr)
{
struct netdev_notifier_changeupper_info *info;
struct net_bridge *br = netdev_priv(dev);
int vlcmd = 0, ret = 0;
bool changed = false;
switch (event) {
case NETDEV_REGISTER:
ret = br_vlan_add(br, br->default_pvid,
BRIDGE_VLAN_INFO_PVID |
BRIDGE_VLAN_INFO_UNTAGGED |
BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
vlcmd = RTM_NEWVLAN;
break;
case NETDEV_UNREGISTER:
changed = !br_vlan_delete(br, br->default_pvid);
vlcmd = RTM_DELVLAN;
break;
case NETDEV_CHANGEUPPER:
info = ptr;
br_vlan_upper_change(dev, info->upper_dev, info->linking);
break;
case NETDEV_CHANGE:
case NETDEV_UP:
if (!br_opt_get(br, BROPT_VLAN_BRIDGE_BINDING))
break;
br_vlan_link_state_change(dev, br);
break;
}
if (changed)
br_vlan_notify(br, NULL, br->default_pvid, 0, vlcmd);
return ret;
}
/* Must be protected by RTNL. */
void br_vlan_port_event(struct net_bridge_port *p, unsigned long event)
{
if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
return;
switch (event) {
case NETDEV_CHANGE:
case NETDEV_DOWN:
case NETDEV_UP:
br_vlan_set_all_vlan_dev_state(p);
break;
}
}
static bool br_vlan_stats_fill(struct sk_buff *skb,
const struct net_bridge_vlan *v)
{
struct pcpu_sw_netstats stats;
struct nlattr *nest;
nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY_STATS);
if (!nest)
return false;
br_vlan_get_stats(v, &stats);
if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_BYTES,
u64_stats_read(&stats.rx_bytes),
BRIDGE_VLANDB_STATS_PAD) ||
nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_PACKETS,
u64_stats_read(&stats.rx_packets),
BRIDGE_VLANDB_STATS_PAD) ||
nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_BYTES,
u64_stats_read(&stats.tx_bytes),
BRIDGE_VLANDB_STATS_PAD) ||
nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_PACKETS,
u64_stats_read(&stats.tx_packets),
BRIDGE_VLANDB_STATS_PAD))
goto out_err;
nla_nest_end(skb, nest);
return true;
out_err:
nla_nest_cancel(skb, nest);
return false;
}
/* v_opts is used to dump the options which must be equal in the whole range */
static bool br_vlan_fill_vids(struct sk_buff *skb, u16 vid, u16 vid_range,
const struct net_bridge_vlan *v_opts,
const struct net_bridge_port *p,
u16 flags,
bool dump_stats)
{
struct bridge_vlan_info info;
struct nlattr *nest;
nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY);
if (!nest)
return false;
memset(&info, 0, sizeof(info));
info.vid = vid;
if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
info.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
if (flags & BRIDGE_VLAN_INFO_PVID)
info.flags |= BRIDGE_VLAN_INFO_PVID;
if (nla_put(skb, BRIDGE_VLANDB_ENTRY_INFO, sizeof(info), &info))
goto out_err;
if (vid_range && vid < vid_range &&
!(flags & BRIDGE_VLAN_INFO_PVID) &&
nla_put_u16(skb, BRIDGE_VLANDB_ENTRY_RANGE, vid_range))
goto out_err;
if (v_opts) {
if (!br_vlan_opts_fill(skb, v_opts, p))
goto out_err;
if (dump_stats && !br_vlan_stats_fill(skb, v_opts))
goto out_err;
}
nla_nest_end(skb, nest);
return true;
out_err:
nla_nest_cancel(skb, nest);
return false;
}
static size_t rtnl_vlan_nlmsg_size(void)
{
return NLMSG_ALIGN(sizeof(struct br_vlan_msg))
+ nla_total_size(0) /* BRIDGE_VLANDB_ENTRY */
+ nla_total_size(sizeof(u16)) /* BRIDGE_VLANDB_ENTRY_RANGE */
+ nla_total_size(sizeof(struct bridge_vlan_info)) /* BRIDGE_VLANDB_ENTRY_INFO */
+ br_vlan_opts_nl_size(); /* bridge vlan options */
}
void br_vlan_notify(const struct net_bridge *br,
const struct net_bridge_port *p,
u16 vid, u16 vid_range,
int cmd)
{
struct net_bridge_vlan_group *vg;
struct net_bridge_vlan *v = NULL;
struct br_vlan_msg *bvm;
struct nlmsghdr *nlh;
struct sk_buff *skb;
int err = -ENOBUFS;
struct net *net;
u16 flags = 0;
int ifindex;
/* right now notifications are done only with rtnl held */
ASSERT_RTNL();
if (p) {
ifindex = p->dev->ifindex;
vg = nbp_vlan_group(p);
net = dev_net(p->dev);
} else {
ifindex = br->dev->ifindex;
vg = br_vlan_group(br);
net = dev_net(br->dev);
}
skb = nlmsg_new(rtnl_vlan_nlmsg_size(), GFP_KERNEL);
if (!skb)
goto out_err;
err = -EMSGSIZE;
nlh = nlmsg_put(skb, 0, 0, cmd, sizeof(*bvm), 0);
if (!nlh)
goto out_err;
bvm = nlmsg_data(nlh);
memset(bvm, 0, sizeof(*bvm));
bvm->family = AF_BRIDGE;
bvm->ifindex = ifindex;
switch (cmd) {
case RTM_NEWVLAN:
/* need to find the vlan due to flags/options */
v = br_vlan_find(vg, vid);
if (!v || !br_vlan_should_use(v))
goto out_kfree;
flags = v->flags;
if (br_get_pvid(vg) == v->vid)
flags |= BRIDGE_VLAN_INFO_PVID;
break;
case RTM_DELVLAN:
break;
default:
goto out_kfree;
}
if (!br_vlan_fill_vids(skb, vid, vid_range, v, p, flags, false))
goto out_err;
nlmsg_end(skb, nlh);
rtnl_notify(skb, net, 0, RTNLGRP_BRVLAN, NULL, GFP_KERNEL);
return;
out_err:
rtnl_set_sk_err(net, RTNLGRP_BRVLAN, err);
out_kfree:
kfree_skb(skb);
}
/* check if v_curr can enter a range ending in range_end */
bool br_vlan_can_enter_range(const struct net_bridge_vlan *v_curr,
const struct net_bridge_vlan *range_end)
{
return v_curr->vid - range_end->vid == 1 &&
range_end->flags == v_curr->flags &&
br_vlan_opts_eq_range(v_curr, range_end);
}
static int br_vlan_dump_dev(const struct net_device *dev,
struct sk_buff *skb,
struct netlink_callback *cb,
u32 dump_flags)
{
struct net_bridge_vlan *v, *range_start = NULL, *range_end = NULL;
bool dump_global = !!(dump_flags & BRIDGE_VLANDB_DUMPF_GLOBAL);
bool dump_stats = !!(dump_flags & BRIDGE_VLANDB_DUMPF_STATS);
struct net_bridge_vlan_group *vg;
int idx = 0, s_idx = cb->args[1];
struct nlmsghdr *nlh = NULL;
struct net_bridge_port *p;
struct br_vlan_msg *bvm;
struct net_bridge *br;
int err = 0;
u16 pvid;
if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev))
return -EINVAL;
if (netif_is_bridge_master(dev)) {
br = netdev_priv(dev);
vg = br_vlan_group_rcu(br);
p = NULL;
} else {
/* global options are dumped only for bridge devices */
if (dump_global)
return 0;
p = br_port_get_rcu(dev);
if (WARN_ON(!p))
return -EINVAL;
vg = nbp_vlan_group_rcu(p);
br = p->br;
}
if (!vg)
return 0;
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
RTM_NEWVLAN, sizeof(*bvm), NLM_F_MULTI);
if (!nlh)
return -EMSGSIZE;
bvm = nlmsg_data(nlh);
memset(bvm, 0, sizeof(*bvm));
bvm->family = PF_BRIDGE;
bvm->ifindex = dev->ifindex;
pvid = br_get_pvid(vg);
/* idx must stay at range's beginning until it is filled in */
list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
if (!dump_global && !br_vlan_should_use(v))
continue;
if (idx < s_idx) {
idx++;
continue;
}
if (!range_start) {
range_start = v;
range_end = v;
continue;
}
if (dump_global) {
if (br_vlan_global_opts_can_enter_range(v, range_end))
goto update_end;
if (!br_vlan_global_opts_fill(skb, range_start->vid,
range_end->vid,
range_start)) {
err = -EMSGSIZE;
break;
}
/* advance number of filled vlans */
idx += range_end->vid - range_start->vid + 1;
range_start = v;
} else if (dump_stats || v->vid == pvid ||
!br_vlan_can_enter_range(v, range_end)) {
u16 vlan_flags = br_vlan_flags(range_start, pvid);
if (!br_vlan_fill_vids(skb, range_start->vid,
range_end->vid, range_start,
p, vlan_flags, dump_stats)) {
err = -EMSGSIZE;
break;
}
/* advance number of filled vlans */
idx += range_end->vid - range_start->vid + 1;
range_start = v;
}
update_end:
range_end = v;
}
/* err will be 0 and range_start will be set in 3 cases here:
* - first vlan (range_start == range_end)
* - last vlan (range_start == range_end, not in range)
* - last vlan range (range_start != range_end, in range)
*/
if (!err && range_start) {
if (dump_global &&
!br_vlan_global_opts_fill(skb, range_start->vid,
range_end->vid, range_start))
err = -EMSGSIZE;
else if (!dump_global &&
!br_vlan_fill_vids(skb, range_start->vid,
range_end->vid, range_start,
p, br_vlan_flags(range_start, pvid),
dump_stats))
err = -EMSGSIZE;
}
cb->args[1] = err ? idx : 0;
nlmsg_end(skb, nlh);
return err;
}
static const struct nla_policy br_vlan_db_dump_pol[BRIDGE_VLANDB_DUMP_MAX + 1] = {
[BRIDGE_VLANDB_DUMP_FLAGS] = { .type = NLA_U32 },
};
static int br_vlan_rtm_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
struct nlattr *dtb[BRIDGE_VLANDB_DUMP_MAX + 1];
int idx = 0, err = 0, s_idx = cb->args[0];
struct net *net = sock_net(skb->sk);
struct br_vlan_msg *bvm;
struct net_device *dev;
u32 dump_flags = 0;
err = nlmsg_parse(cb->nlh, sizeof(*bvm), dtb, BRIDGE_VLANDB_DUMP_MAX,
br_vlan_db_dump_pol, cb->extack);
if (err < 0)
return err;
bvm = nlmsg_data(cb->nlh);
if (dtb[BRIDGE_VLANDB_DUMP_FLAGS])
dump_flags = nla_get_u32(dtb[BRIDGE_VLANDB_DUMP_FLAGS]);
rcu_read_lock();
if (bvm->ifindex) {
dev = dev_get_by_index_rcu(net, bvm->ifindex);
if (!dev) {
err = -ENODEV;
goto out_err;
}
err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
/* if the dump completed without an error we return 0 here */
if (err != -EMSGSIZE)
goto out_err;
} else {
for_each_netdev_rcu(net, dev) {
if (idx < s_idx)
goto skip;
err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
if (err == -EMSGSIZE)
break;
skip:
idx++;
}
}
cb->args[0] = idx;
rcu_read_unlock();
return skb->len;
out_err:
rcu_read_unlock();
return err;
}
static const struct nla_policy br_vlan_db_policy[BRIDGE_VLANDB_ENTRY_MAX + 1] = {
[BRIDGE_VLANDB_ENTRY_INFO] =
NLA_POLICY_EXACT_LEN(sizeof(struct bridge_vlan_info)),
[BRIDGE_VLANDB_ENTRY_RANGE] = { .type = NLA_U16 },
[BRIDGE_VLANDB_ENTRY_STATE] = { .type = NLA_U8 },
[BRIDGE_VLANDB_ENTRY_TUNNEL_INFO] = { .type = NLA_NESTED },
[BRIDGE_VLANDB_ENTRY_MCAST_ROUTER] = { .type = NLA_U8 },
[BRIDGE_VLANDB_ENTRY_MCAST_N_GROUPS] = { .type = NLA_REJECT },
[BRIDGE_VLANDB_ENTRY_MCAST_MAX_GROUPS] = { .type = NLA_U32 },
[BRIDGE_VLANDB_ENTRY_NEIGH_SUPPRESS] = NLA_POLICY_MAX(NLA_U8, 1),
};
static int br_vlan_rtm_process_one(struct net_device *dev,
const struct nlattr *attr,
int cmd, struct netlink_ext_ack *extack)
{
struct bridge_vlan_info *vinfo, vrange_end, *vinfo_last = NULL;
struct nlattr *tb[BRIDGE_VLANDB_ENTRY_MAX + 1];
bool changed = false, skip_processing = false;
struct net_bridge_vlan_group *vg;
struct net_bridge_port *p = NULL;
int err = 0, cmdmap = 0;
struct net_bridge *br;
if (netif_is_bridge_master(dev)) {
br = netdev_priv(dev);
vg = br_vlan_group(br);
} else {
p = br_port_get_rtnl(dev);
if (WARN_ON(!p))
return -ENODEV;
br = p->br;
vg = nbp_vlan_group(p);
}
if (WARN_ON(!vg))
return -ENODEV;
err = nla_parse_nested(tb, BRIDGE_VLANDB_ENTRY_MAX, attr,
br_vlan_db_policy, extack);
if (err)
return err;
if (!tb[BRIDGE_VLANDB_ENTRY_INFO]) {
NL_SET_ERR_MSG_MOD(extack, "Missing vlan entry info");
return -EINVAL;
}
memset(&vrange_end, 0, sizeof(vrange_end));
vinfo = nla_data(tb[BRIDGE_VLANDB_ENTRY_INFO]);
if (vinfo->flags & (BRIDGE_VLAN_INFO_RANGE_BEGIN |
BRIDGE_VLAN_INFO_RANGE_END)) {
NL_SET_ERR_MSG_MOD(extack, "Old-style vlan ranges are not allowed when using RTM vlan calls");
return -EINVAL;
}
if (!br_vlan_valid_id(vinfo->vid, extack))
return -EINVAL;
if (tb[BRIDGE_VLANDB_ENTRY_RANGE]) {
vrange_end.vid = nla_get_u16(tb[BRIDGE_VLANDB_ENTRY_RANGE]);
/* validate user-provided flags without RANGE_BEGIN */
vrange_end.flags = BRIDGE_VLAN_INFO_RANGE_END | vinfo->flags;
vinfo->flags |= BRIDGE_VLAN_INFO_RANGE_BEGIN;
/* vinfo_last is the range start, vinfo the range end */
vinfo_last = vinfo;
vinfo = &vrange_end;
if (!br_vlan_valid_id(vinfo->vid, extack) ||
!br_vlan_valid_range(vinfo, vinfo_last, extack))
return -EINVAL;
}
switch (cmd) {
case RTM_NEWVLAN:
cmdmap = RTM_SETLINK;
skip_processing = !!(vinfo->flags & BRIDGE_VLAN_INFO_ONLY_OPTS);
break;
case RTM_DELVLAN:
cmdmap = RTM_DELLINK;
break;
}
if (!skip_processing) {
struct bridge_vlan_info *tmp_last = vinfo_last;
/* br_process_vlan_info may overwrite vinfo_last */
err = br_process_vlan_info(br, p, cmdmap, vinfo, &tmp_last,
&changed, extack);
/* notify first if anything changed */
if (changed)
br_ifinfo_notify(cmdmap, br, p);
if (err)
return err;
}
/* deal with options */
if (cmd == RTM_NEWVLAN) {
struct net_bridge_vlan *range_start, *range_end;
if (vinfo_last) {
range_start = br_vlan_find(vg, vinfo_last->vid);
range_end = br_vlan_find(vg, vinfo->vid);
} else {
range_start = br_vlan_find(vg, vinfo->vid);
range_end = range_start;
}
err = br_vlan_process_options(br, p, range_start, range_end,
tb, extack);
}
return err;
}
static int br_vlan_rtm_process(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct br_vlan_msg *bvm;
struct net_device *dev;
struct nlattr *attr;
int err, vlans = 0;
int rem;
/* this should validate the header and check for remaining bytes */
err = nlmsg_parse(nlh, sizeof(*bvm), NULL, BRIDGE_VLANDB_MAX, NULL,
extack);
if (err < 0)
return err;
bvm = nlmsg_data(nlh);
dev = __dev_get_by_index(net, bvm->ifindex);
if (!dev)
return -ENODEV;
if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev)) {
NL_SET_ERR_MSG_MOD(extack, "The device is not a valid bridge or bridge port");
return -EINVAL;
}
nlmsg_for_each_attr(attr, nlh, sizeof(*bvm), rem) {
switch (nla_type(attr)) {
case BRIDGE_VLANDB_ENTRY:
err = br_vlan_rtm_process_one(dev, attr,
nlh->nlmsg_type,
extack);
break;
case BRIDGE_VLANDB_GLOBAL_OPTIONS:
err = br_vlan_rtm_process_global_options(dev, attr,
nlh->nlmsg_type,
extack);
break;
default:
continue;
}
vlans++;
if (err)
break;
}
if (!vlans) {
NL_SET_ERR_MSG_MOD(extack, "No vlans found to process");
err = -EINVAL;
}
return err;
}
void br_vlan_rtnl_init(void)
{
rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETVLAN, NULL,
br_vlan_rtm_dump, 0);
rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWVLAN,
br_vlan_rtm_process, NULL, 0);
rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELVLAN,
br_vlan_rtm_process, NULL, 0);
}
void br_vlan_rtnl_uninit(void)
{
rtnl_unregister(PF_BRIDGE, RTM_GETVLAN);
rtnl_unregister(PF_BRIDGE, RTM_NEWVLAN);
rtnl_unregister(PF_BRIDGE, RTM_DELVLAN);
}
| linux-master | net/bridge/br_vlan.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <net/switchdev.h>
#include "br_private_mrp.h"
static enum br_mrp_hw_support
br_mrp_switchdev_port_obj(struct net_bridge *br,
const struct switchdev_obj *obj, bool add)
{
int err;
if (add)
err = switchdev_port_obj_add(br->dev, obj, NULL);
else
err = switchdev_port_obj_del(br->dev, obj);
/* In case of success just return and notify the SW that doesn't need
* to do anything
*/
if (!err)
return BR_MRP_HW;
if (err != -EOPNOTSUPP)
return BR_MRP_NONE;
/* Continue with SW backup */
return BR_MRP_SW;
}
int br_mrp_switchdev_add(struct net_bridge *br, struct br_mrp *mrp)
{
struct switchdev_obj_mrp mrp_obj = {
.obj.orig_dev = br->dev,
.obj.id = SWITCHDEV_OBJ_ID_MRP,
.p_port = rtnl_dereference(mrp->p_port)->dev,
.s_port = rtnl_dereference(mrp->s_port)->dev,
.ring_id = mrp->ring_id,
.prio = mrp->prio,
};
if (!IS_ENABLED(CONFIG_NET_SWITCHDEV))
return 0;
return switchdev_port_obj_add(br->dev, &mrp_obj.obj, NULL);
}
int br_mrp_switchdev_del(struct net_bridge *br, struct br_mrp *mrp)
{
struct switchdev_obj_mrp mrp_obj = {
.obj.orig_dev = br->dev,
.obj.id = SWITCHDEV_OBJ_ID_MRP,
.p_port = NULL,
.s_port = NULL,
.ring_id = mrp->ring_id,
};
if (!IS_ENABLED(CONFIG_NET_SWITCHDEV))
return 0;
return switchdev_port_obj_del(br->dev, &mrp_obj.obj);
}
enum br_mrp_hw_support
br_mrp_switchdev_set_ring_role(struct net_bridge *br, struct br_mrp *mrp,
enum br_mrp_ring_role_type role)
{
struct switchdev_obj_ring_role_mrp mrp_role = {
.obj.orig_dev = br->dev,
.obj.id = SWITCHDEV_OBJ_ID_RING_ROLE_MRP,
.ring_role = role,
.ring_id = mrp->ring_id,
.sw_backup = false,
};
enum br_mrp_hw_support support;
int err;
if (!IS_ENABLED(CONFIG_NET_SWITCHDEV))
return BR_MRP_SW;
support = br_mrp_switchdev_port_obj(br, &mrp_role.obj,
role != BR_MRP_RING_ROLE_DISABLED);
if (support != BR_MRP_SW)
return support;
/* If the driver can't configure to run completely the protocol in HW,
* then try again to configure the HW so the SW can run the protocol.
*/
mrp_role.sw_backup = true;
if (role != BR_MRP_RING_ROLE_DISABLED)
err = switchdev_port_obj_add(br->dev, &mrp_role.obj, NULL);
else
err = switchdev_port_obj_del(br->dev, &mrp_role.obj);
if (!err)
return BR_MRP_SW;
return BR_MRP_NONE;
}
enum br_mrp_hw_support
br_mrp_switchdev_send_ring_test(struct net_bridge *br, struct br_mrp *mrp,
u32 interval, u8 max_miss, u32 period,
bool monitor)
{
struct switchdev_obj_ring_test_mrp test = {
.obj.orig_dev = br->dev,
.obj.id = SWITCHDEV_OBJ_ID_RING_TEST_MRP,
.interval = interval,
.max_miss = max_miss,
.ring_id = mrp->ring_id,
.period = period,
.monitor = monitor,
};
if (!IS_ENABLED(CONFIG_NET_SWITCHDEV))
return BR_MRP_SW;
return br_mrp_switchdev_port_obj(br, &test.obj, interval != 0);
}
int br_mrp_switchdev_set_ring_state(struct net_bridge *br,
struct br_mrp *mrp,
enum br_mrp_ring_state_type state)
{
struct switchdev_obj_ring_state_mrp mrp_state = {
.obj.orig_dev = br->dev,
.obj.id = SWITCHDEV_OBJ_ID_RING_STATE_MRP,
.ring_state = state,
.ring_id = mrp->ring_id,
};
if (!IS_ENABLED(CONFIG_NET_SWITCHDEV))
return 0;
return switchdev_port_obj_add(br->dev, &mrp_state.obj, NULL);
}
enum br_mrp_hw_support
br_mrp_switchdev_set_in_role(struct net_bridge *br, struct br_mrp *mrp,
u16 in_id, u32 ring_id,
enum br_mrp_in_role_type role)
{
struct switchdev_obj_in_role_mrp mrp_role = {
.obj.orig_dev = br->dev,
.obj.id = SWITCHDEV_OBJ_ID_IN_ROLE_MRP,
.in_role = role,
.in_id = mrp->in_id,
.ring_id = mrp->ring_id,
.i_port = rtnl_dereference(mrp->i_port)->dev,
.sw_backup = false,
};
enum br_mrp_hw_support support;
int err;
if (!IS_ENABLED(CONFIG_NET_SWITCHDEV))
return BR_MRP_SW;
support = br_mrp_switchdev_port_obj(br, &mrp_role.obj,
role != BR_MRP_IN_ROLE_DISABLED);
if (support != BR_MRP_NONE)
return support;
/* If the driver can't configure to run completely the protocol in HW,
* then try again to configure the HW so the SW can run the protocol.
*/
mrp_role.sw_backup = true;
if (role != BR_MRP_IN_ROLE_DISABLED)
err = switchdev_port_obj_add(br->dev, &mrp_role.obj, NULL);
else
err = switchdev_port_obj_del(br->dev, &mrp_role.obj);
if (!err)
return BR_MRP_SW;
return BR_MRP_NONE;
}
int br_mrp_switchdev_set_in_state(struct net_bridge *br, struct br_mrp *mrp,
enum br_mrp_in_state_type state)
{
struct switchdev_obj_in_state_mrp mrp_state = {
.obj.orig_dev = br->dev,
.obj.id = SWITCHDEV_OBJ_ID_IN_STATE_MRP,
.in_state = state,
.in_id = mrp->in_id,
};
if (!IS_ENABLED(CONFIG_NET_SWITCHDEV))
return 0;
return switchdev_port_obj_add(br->dev, &mrp_state.obj, NULL);
}
enum br_mrp_hw_support
br_mrp_switchdev_send_in_test(struct net_bridge *br, struct br_mrp *mrp,
u32 interval, u8 max_miss, u32 period)
{
struct switchdev_obj_in_test_mrp test = {
.obj.orig_dev = br->dev,
.obj.id = SWITCHDEV_OBJ_ID_IN_TEST_MRP,
.interval = interval,
.max_miss = max_miss,
.in_id = mrp->in_id,
.period = period,
};
if (!IS_ENABLED(CONFIG_NET_SWITCHDEV))
return BR_MRP_SW;
return br_mrp_switchdev_port_obj(br, &test.obj, interval != 0);
}
int br_mrp_port_switchdev_set_state(struct net_bridge_port *p, u32 state)
{
struct switchdev_attr attr = {
.orig_dev = p->dev,
.id = SWITCHDEV_ATTR_ID_PORT_STP_STATE,
.u.stp_state = state,
};
if (!IS_ENABLED(CONFIG_NET_SWITCHDEV))
return 0;
return switchdev_port_attr_set(p->dev, &attr, NULL);
}
int br_mrp_port_switchdev_set_role(struct net_bridge_port *p,
enum br_mrp_port_role_type role)
{
struct switchdev_attr attr = {
.orig_dev = p->dev,
.id = SWITCHDEV_ATTR_ID_MRP_PORT_ROLE,
.u.mrp_port_role = role,
};
if (!IS_ENABLED(CONFIG_NET_SWITCHDEV))
return 0;
return switchdev_port_attr_set(p->dev, &attr, NULL);
}
| linux-master | net/bridge/br_mrp_switchdev.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Bridge per vlan tunnel port dst_metadata handling code
*
* Authors:
* Roopa Prabhu <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <net/switchdev.h>
#include <net/dst_metadata.h>
#include "br_private.h"
#include "br_private_tunnel.h"
static inline int br_vlan_tunid_cmp(struct rhashtable_compare_arg *arg,
const void *ptr)
{
const struct net_bridge_vlan *vle = ptr;
__be64 tunid = *(__be64 *)arg->key;
return vle->tinfo.tunnel_id != tunid;
}
static const struct rhashtable_params br_vlan_tunnel_rht_params = {
.head_offset = offsetof(struct net_bridge_vlan, tnode),
.key_offset = offsetof(struct net_bridge_vlan, tinfo.tunnel_id),
.key_len = sizeof(__be64),
.nelem_hint = 3,
.obj_cmpfn = br_vlan_tunid_cmp,
.automatic_shrinking = true,
};
static struct net_bridge_vlan *br_vlan_tunnel_lookup(struct rhashtable *tbl,
__be64 tunnel_id)
{
return rhashtable_lookup_fast(tbl, &tunnel_id,
br_vlan_tunnel_rht_params);
}
static void vlan_tunnel_info_release(struct net_bridge_vlan *vlan)
{
struct metadata_dst *tdst = rtnl_dereference(vlan->tinfo.tunnel_dst);
WRITE_ONCE(vlan->tinfo.tunnel_id, 0);
RCU_INIT_POINTER(vlan->tinfo.tunnel_dst, NULL);
dst_release(&tdst->dst);
}
void vlan_tunnel_info_del(struct net_bridge_vlan_group *vg,
struct net_bridge_vlan *vlan)
{
if (!rcu_access_pointer(vlan->tinfo.tunnel_dst))
return;
rhashtable_remove_fast(&vg->tunnel_hash, &vlan->tnode,
br_vlan_tunnel_rht_params);
vlan_tunnel_info_release(vlan);
}
static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
struct net_bridge_vlan *vlan, u32 tun_id)
{
struct metadata_dst *metadata = rtnl_dereference(vlan->tinfo.tunnel_dst);
__be64 key = key32_to_tunnel_id(cpu_to_be32(tun_id));
int err;
if (metadata)
return -EEXIST;
metadata = __ip_tun_set_dst(0, 0, 0, 0, 0, TUNNEL_KEY,
key, 0);
if (!metadata)
return -EINVAL;
metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_BRIDGE;
rcu_assign_pointer(vlan->tinfo.tunnel_dst, metadata);
WRITE_ONCE(vlan->tinfo.tunnel_id, key);
err = rhashtable_lookup_insert_fast(&vg->tunnel_hash, &vlan->tnode,
br_vlan_tunnel_rht_params);
if (err)
goto out;
return 0;
out:
vlan_tunnel_info_release(vlan);
return err;
}
/* Must be protected by RTNL.
* Must be called with vid in range from 1 to 4094 inclusive.
*/
int nbp_vlan_tunnel_info_add(const struct net_bridge_port *port, u16 vid,
u32 tun_id)
{
struct net_bridge_vlan_group *vg;
struct net_bridge_vlan *vlan;
ASSERT_RTNL();
vg = nbp_vlan_group(port);
vlan = br_vlan_find(vg, vid);
if (!vlan)
return -EINVAL;
return __vlan_tunnel_info_add(vg, vlan, tun_id);
}
/* Must be protected by RTNL.
* Must be called with vid in range from 1 to 4094 inclusive.
*/
int nbp_vlan_tunnel_info_delete(const struct net_bridge_port *port, u16 vid)
{
struct net_bridge_vlan_group *vg;
struct net_bridge_vlan *v;
ASSERT_RTNL();
vg = nbp_vlan_group(port);
v = br_vlan_find(vg, vid);
if (!v)
return -ENOENT;
vlan_tunnel_info_del(vg, v);
return 0;
}
static void __vlan_tunnel_info_flush(struct net_bridge_vlan_group *vg)
{
struct net_bridge_vlan *vlan, *tmp;
list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
vlan_tunnel_info_del(vg, vlan);
}
void nbp_vlan_tunnel_info_flush(struct net_bridge_port *port)
{
struct net_bridge_vlan_group *vg;
ASSERT_RTNL();
vg = nbp_vlan_group(port);
__vlan_tunnel_info_flush(vg);
}
int vlan_tunnel_init(struct net_bridge_vlan_group *vg)
{
return rhashtable_init(&vg->tunnel_hash, &br_vlan_tunnel_rht_params);
}
void vlan_tunnel_deinit(struct net_bridge_vlan_group *vg)
{
rhashtable_destroy(&vg->tunnel_hash);
}
void br_handle_ingress_vlan_tunnel(struct sk_buff *skb,
struct net_bridge_port *p,
struct net_bridge_vlan_group *vg)
{
struct ip_tunnel_info *tinfo = skb_tunnel_info(skb);
struct net_bridge_vlan *vlan;
if (!vg || !tinfo)
return;
/* if already tagged, ignore */
if (skb_vlan_tagged(skb))
return;
/* lookup vid, given tunnel id */
vlan = br_vlan_tunnel_lookup(&vg->tunnel_hash, tinfo->key.tun_id);
if (!vlan)
return;
skb_dst_drop(skb);
__vlan_hwaccel_put_tag(skb, p->br->vlan_proto, vlan->vid);
}
int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
struct net_bridge_vlan *vlan)
{
struct metadata_dst *tunnel_dst;
__be64 tunnel_id;
int err;
if (!vlan)
return 0;
tunnel_id = READ_ONCE(vlan->tinfo.tunnel_id);
if (!tunnel_id || unlikely(!skb_vlan_tag_present(skb)))
return 0;
skb_dst_drop(skb);
err = skb_vlan_pop(skb);
if (err)
return err;
if (BR_INPUT_SKB_CB(skb)->backup_nhid) {
tunnel_dst = __ip_tun_set_dst(0, 0, 0, 0, 0, TUNNEL_KEY,
tunnel_id, 0);
if (!tunnel_dst)
return -ENOMEM;
tunnel_dst->u.tun_info.mode |= IP_TUNNEL_INFO_TX |
IP_TUNNEL_INFO_BRIDGE;
tunnel_dst->u.tun_info.key.nhid =
BR_INPUT_SKB_CB(skb)->backup_nhid;
skb_dst_set(skb, &tunnel_dst->dst);
return 0;
}
tunnel_dst = rcu_dereference(vlan->tinfo.tunnel_dst);
if (tunnel_dst && dst_hold_safe(&tunnel_dst->dst))
skb_dst_set(skb, &tunnel_dst->dst);
return 0;
}
| linux-master | net/bridge/br_vlan_tunnel.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include <net/ip.h>
#include <net/switchdev.h>
#include "br_private.h"
static struct static_key_false br_switchdev_tx_fwd_offload;
static bool nbp_switchdev_can_offload_tx_fwd(const struct net_bridge_port *p,
const struct sk_buff *skb)
{
if (!static_branch_unlikely(&br_switchdev_tx_fwd_offload))
return false;
return (p->flags & BR_TX_FWD_OFFLOAD) &&
(p->hwdom != BR_INPUT_SKB_CB(skb)->src_hwdom);
}
bool br_switchdev_frame_uses_tx_fwd_offload(struct sk_buff *skb)
{
if (!static_branch_unlikely(&br_switchdev_tx_fwd_offload))
return false;
return BR_INPUT_SKB_CB(skb)->tx_fwd_offload;
}
void br_switchdev_frame_set_offload_fwd_mark(struct sk_buff *skb)
{
skb->offload_fwd_mark = br_switchdev_frame_uses_tx_fwd_offload(skb);
}
/* Mark the frame for TX forwarding offload if this egress port supports it */
void nbp_switchdev_frame_mark_tx_fwd_offload(const struct net_bridge_port *p,
struct sk_buff *skb)
{
if (nbp_switchdev_can_offload_tx_fwd(p, skb))
BR_INPUT_SKB_CB(skb)->tx_fwd_offload = true;
}
/* Lazily adds the hwdom of the egress bridge port to the bit mask of hwdoms
* that the skb has been already forwarded to, to avoid further cloning to
* other ports in the same hwdom by making nbp_switchdev_allowed_egress()
* return false.
*/
void nbp_switchdev_frame_mark_tx_fwd_to_hwdom(const struct net_bridge_port *p,
struct sk_buff *skb)
{
if (nbp_switchdev_can_offload_tx_fwd(p, skb))
set_bit(p->hwdom, &BR_INPUT_SKB_CB(skb)->fwd_hwdoms);
}
void nbp_switchdev_frame_mark(const struct net_bridge_port *p,
struct sk_buff *skb)
{
if (p->hwdom)
BR_INPUT_SKB_CB(skb)->src_hwdom = p->hwdom;
}
bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p,
const struct sk_buff *skb)
{
struct br_input_skb_cb *cb = BR_INPUT_SKB_CB(skb);
return !test_bit(p->hwdom, &cb->fwd_hwdoms) &&
(!skb->offload_fwd_mark || cb->src_hwdom != p->hwdom);
}
/* Flags that can be offloaded to hardware */
#define BR_PORT_FLAGS_HW_OFFLOAD (BR_LEARNING | BR_FLOOD | BR_PORT_MAB | \
BR_MCAST_FLOOD | BR_BCAST_FLOOD | BR_PORT_LOCKED | \
BR_HAIRPIN_MODE | BR_ISOLATED | BR_MULTICAST_TO_UNICAST)
int br_switchdev_set_port_flag(struct net_bridge_port *p,
unsigned long flags,
unsigned long mask,
struct netlink_ext_ack *extack)
{
struct switchdev_attr attr = {
.orig_dev = p->dev,
};
struct switchdev_notifier_port_attr_info info = {
.attr = &attr,
};
int err;
mask &= BR_PORT_FLAGS_HW_OFFLOAD;
if (!mask)
return 0;
attr.id = SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS;
attr.u.brport_flags.val = flags;
attr.u.brport_flags.mask = mask;
/* We run from atomic context here */
err = call_switchdev_notifiers(SWITCHDEV_PORT_ATTR_SET, p->dev,
&info.info, extack);
err = notifier_to_errno(err);
if (err == -EOPNOTSUPP)
return 0;
if (err) {
NL_SET_ERR_MSG_WEAK_MOD(extack,
"bridge flag offload is not supported");
return -EOPNOTSUPP;
}
attr.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS;
attr.flags = SWITCHDEV_F_DEFER;
err = switchdev_port_attr_set(p->dev, &attr, extack);
if (err) {
NL_SET_ERR_MSG_WEAK_MOD(extack,
"error setting offload flag on port");
return err;
}
return 0;
}
static void br_switchdev_fdb_populate(struct net_bridge *br,
struct switchdev_notifier_fdb_info *item,
const struct net_bridge_fdb_entry *fdb,
const void *ctx)
{
const struct net_bridge_port *p = READ_ONCE(fdb->dst);
item->addr = fdb->key.addr.addr;
item->vid = fdb->key.vlan_id;
item->added_by_user = test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
item->offloaded = test_bit(BR_FDB_OFFLOADED, &fdb->flags);
item->is_local = test_bit(BR_FDB_LOCAL, &fdb->flags);
item->locked = false;
item->info.dev = (!p || item->is_local) ? br->dev : p->dev;
item->info.ctx = ctx;
}
void
br_switchdev_fdb_notify(struct net_bridge *br,
const struct net_bridge_fdb_entry *fdb, int type)
{
struct switchdev_notifier_fdb_info item;
if (test_bit(BR_FDB_LOCKED, &fdb->flags))
return;
/* Entries with these flags were created using ndm_state == NUD_REACHABLE,
* ndm_flags == NTF_MASTER( | NTF_STICKY), ext_flags == 0 by something
* equivalent to 'bridge fdb add ... master dynamic (sticky)'.
* Drivers don't know how to deal with these, so don't notify them to
* avoid confusing them.
*/
if (test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags) &&
!test_bit(BR_FDB_STATIC, &fdb->flags) &&
!test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
return;
br_switchdev_fdb_populate(br, &item, fdb, NULL);
switch (type) {
case RTM_DELNEIGH:
call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_DEVICE,
item.info.dev, &item.info, NULL);
break;
case RTM_NEWNEIGH:
call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_DEVICE,
item.info.dev, &item.info, NULL);
break;
}
}
int br_switchdev_port_vlan_add(struct net_device *dev, u16 vid, u16 flags,
bool changed, struct netlink_ext_ack *extack)
{
struct switchdev_obj_port_vlan v = {
.obj.orig_dev = dev,
.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
.flags = flags,
.vid = vid,
.changed = changed,
};
return switchdev_port_obj_add(dev, &v.obj, extack);
}
int br_switchdev_port_vlan_del(struct net_device *dev, u16 vid)
{
struct switchdev_obj_port_vlan v = {
.obj.orig_dev = dev,
.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
.vid = vid,
};
return switchdev_port_obj_del(dev, &v.obj);
}
static int nbp_switchdev_hwdom_set(struct net_bridge_port *joining)
{
struct net_bridge *br = joining->br;
struct net_bridge_port *p;
int hwdom;
/* joining is yet to be added to the port list. */
list_for_each_entry(p, &br->port_list, list) {
if (netdev_phys_item_id_same(&joining->ppid, &p->ppid)) {
joining->hwdom = p->hwdom;
return 0;
}
}
hwdom = find_next_zero_bit(&br->busy_hwdoms, BR_HWDOM_MAX, 1);
if (hwdom >= BR_HWDOM_MAX)
return -EBUSY;
set_bit(hwdom, &br->busy_hwdoms);
joining->hwdom = hwdom;
return 0;
}
static void nbp_switchdev_hwdom_put(struct net_bridge_port *leaving)
{
struct net_bridge *br = leaving->br;
struct net_bridge_port *p;
/* leaving is no longer in the port list. */
list_for_each_entry(p, &br->port_list, list) {
if (p->hwdom == leaving->hwdom)
return;
}
clear_bit(leaving->hwdom, &br->busy_hwdoms);
}
static int nbp_switchdev_add(struct net_bridge_port *p,
struct netdev_phys_item_id ppid,
bool tx_fwd_offload,
struct netlink_ext_ack *extack)
{
int err;
if (p->offload_count) {
/* Prevent unsupported configurations such as a bridge port
* which is a bonding interface, and the member ports are from
* different hardware switches.
*/
if (!netdev_phys_item_id_same(&p->ppid, &ppid)) {
NL_SET_ERR_MSG_MOD(extack,
"Same bridge port cannot be offloaded by two physical switches");
return -EBUSY;
}
/* Tolerate drivers that call switchdev_bridge_port_offload()
* more than once for the same bridge port, such as when the
* bridge port is an offloaded bonding/team interface.
*/
p->offload_count++;
return 0;
}
p->ppid = ppid;
p->offload_count = 1;
err = nbp_switchdev_hwdom_set(p);
if (err)
return err;
if (tx_fwd_offload) {
p->flags |= BR_TX_FWD_OFFLOAD;
static_branch_inc(&br_switchdev_tx_fwd_offload);
}
return 0;
}
static void nbp_switchdev_del(struct net_bridge_port *p)
{
if (WARN_ON(!p->offload_count))
return;
p->offload_count--;
if (p->offload_count)
return;
if (p->hwdom)
nbp_switchdev_hwdom_put(p);
if (p->flags & BR_TX_FWD_OFFLOAD) {
p->flags &= ~BR_TX_FWD_OFFLOAD;
static_branch_dec(&br_switchdev_tx_fwd_offload);
}
}
static int
br_switchdev_fdb_replay_one(struct net_bridge *br, struct notifier_block *nb,
const struct net_bridge_fdb_entry *fdb,
unsigned long action, const void *ctx)
{
struct switchdev_notifier_fdb_info item;
int err;
br_switchdev_fdb_populate(br, &item, fdb, ctx);
err = nb->notifier_call(nb, action, &item);
return notifier_to_errno(err);
}
static int
br_switchdev_fdb_replay(const struct net_device *br_dev, const void *ctx,
bool adding, struct notifier_block *nb)
{
struct net_bridge_fdb_entry *fdb;
struct net_bridge *br;
unsigned long action;
int err = 0;
if (!nb)
return 0;
if (!netif_is_bridge_master(br_dev))
return -EINVAL;
br = netdev_priv(br_dev);
if (adding)
action = SWITCHDEV_FDB_ADD_TO_DEVICE;
else
action = SWITCHDEV_FDB_DEL_TO_DEVICE;
rcu_read_lock();
hlist_for_each_entry_rcu(fdb, &br->fdb_list, fdb_node) {
err = br_switchdev_fdb_replay_one(br, nb, fdb, action, ctx);
if (err)
break;
}
rcu_read_unlock();
return err;
}
static int br_switchdev_vlan_attr_replay(struct net_device *br_dev,
const void *ctx,
struct notifier_block *nb,
struct netlink_ext_ack *extack)
{
struct switchdev_notifier_port_attr_info attr_info = {
.info = {
.dev = br_dev,
.extack = extack,
.ctx = ctx,
},
};
struct net_bridge *br = netdev_priv(br_dev);
struct net_bridge_vlan_group *vg;
struct switchdev_attr attr;
struct net_bridge_vlan *v;
int err;
attr_info.attr = &attr;
attr.orig_dev = br_dev;
vg = br_vlan_group(br);
if (!vg)
return 0;
list_for_each_entry(v, &vg->vlan_list, vlist) {
if (v->msti) {
attr.id = SWITCHDEV_ATTR_ID_VLAN_MSTI;
attr.u.vlan_msti.vid = v->vid;
attr.u.vlan_msti.msti = v->msti;
err = nb->notifier_call(nb, SWITCHDEV_PORT_ATTR_SET,
&attr_info);
err = notifier_to_errno(err);
if (err)
return err;
}
}
return 0;
}
static int
br_switchdev_vlan_replay_one(struct notifier_block *nb,
struct net_device *dev,
struct switchdev_obj_port_vlan *vlan,
const void *ctx, unsigned long action,
struct netlink_ext_ack *extack)
{
struct switchdev_notifier_port_obj_info obj_info = {
.info = {
.dev = dev,
.extack = extack,
.ctx = ctx,
},
.obj = &vlan->obj,
};
int err;
err = nb->notifier_call(nb, action, &obj_info);
return notifier_to_errno(err);
}
static int br_switchdev_vlan_replay_group(struct notifier_block *nb,
struct net_device *dev,
struct net_bridge_vlan_group *vg,
const void *ctx, unsigned long action,
struct netlink_ext_ack *extack)
{
struct net_bridge_vlan *v;
int err = 0;
u16 pvid;
if (!vg)
return 0;
pvid = br_get_pvid(vg);
list_for_each_entry(v, &vg->vlan_list, vlist) {
struct switchdev_obj_port_vlan vlan = {
.obj.orig_dev = dev,
.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
.flags = br_vlan_flags(v, pvid),
.vid = v->vid,
};
if (!br_vlan_should_use(v))
continue;
err = br_switchdev_vlan_replay_one(nb, dev, &vlan, ctx,
action, extack);
if (err)
return err;
}
return 0;
}
static int br_switchdev_vlan_replay(struct net_device *br_dev,
const void *ctx, bool adding,
struct notifier_block *nb,
struct netlink_ext_ack *extack)
{
struct net_bridge *br = netdev_priv(br_dev);
struct net_bridge_port *p;
unsigned long action;
int err;
ASSERT_RTNL();
if (!nb)
return 0;
if (!netif_is_bridge_master(br_dev))
return -EINVAL;
if (adding)
action = SWITCHDEV_PORT_OBJ_ADD;
else
action = SWITCHDEV_PORT_OBJ_DEL;
err = br_switchdev_vlan_replay_group(nb, br_dev, br_vlan_group(br),
ctx, action, extack);
if (err)
return err;
list_for_each_entry(p, &br->port_list, list) {
struct net_device *dev = p->dev;
err = br_switchdev_vlan_replay_group(nb, dev,
nbp_vlan_group(p),
ctx, action, extack);
if (err)
return err;
}
if (adding) {
err = br_switchdev_vlan_attr_replay(br_dev, ctx, nb, extack);
if (err)
return err;
}
return 0;
}
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
struct br_switchdev_mdb_complete_info {
struct net_bridge_port *port;
struct br_ip ip;
};
static void br_switchdev_mdb_complete(struct net_device *dev, int err, void *priv)
{
struct br_switchdev_mdb_complete_info *data = priv;
struct net_bridge_port_group __rcu **pp;
struct net_bridge_port_group *p;
struct net_bridge_mdb_entry *mp;
struct net_bridge_port *port = data->port;
struct net_bridge *br = port->br;
if (err)
goto err;
spin_lock_bh(&br->multicast_lock);
mp = br_mdb_ip_get(br, &data->ip);
if (!mp)
goto out;
for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
pp = &p->next) {
if (p->key.port != port)
continue;
p->flags |= MDB_PG_FLAGS_OFFLOAD;
}
out:
spin_unlock_bh(&br->multicast_lock);
err:
kfree(priv);
}
static void br_switchdev_mdb_populate(struct switchdev_obj_port_mdb *mdb,
const struct net_bridge_mdb_entry *mp)
{
if (mp->addr.proto == htons(ETH_P_IP))
ip_eth_mc_map(mp->addr.dst.ip4, mdb->addr);
#if IS_ENABLED(CONFIG_IPV6)
else if (mp->addr.proto == htons(ETH_P_IPV6))
ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb->addr);
#endif
else
ether_addr_copy(mdb->addr, mp->addr.dst.mac_addr);
mdb->vid = mp->addr.vid;
}
static void br_switchdev_host_mdb_one(struct net_device *dev,
struct net_device *lower_dev,
struct net_bridge_mdb_entry *mp,
int type)
{
struct switchdev_obj_port_mdb mdb = {
.obj = {
.id = SWITCHDEV_OBJ_ID_HOST_MDB,
.flags = SWITCHDEV_F_DEFER,
.orig_dev = dev,
},
};
br_switchdev_mdb_populate(&mdb, mp);
switch (type) {
case RTM_NEWMDB:
switchdev_port_obj_add(lower_dev, &mdb.obj, NULL);
break;
case RTM_DELMDB:
switchdev_port_obj_del(lower_dev, &mdb.obj);
break;
}
}
static void br_switchdev_host_mdb(struct net_device *dev,
struct net_bridge_mdb_entry *mp, int type)
{
struct net_device *lower_dev;
struct list_head *iter;
netdev_for_each_lower_dev(dev, lower_dev, iter)
br_switchdev_host_mdb_one(dev, lower_dev, mp, type);
}
static int
br_switchdev_mdb_replay_one(struct notifier_block *nb, struct net_device *dev,
const struct switchdev_obj_port_mdb *mdb,
unsigned long action, const void *ctx,
struct netlink_ext_ack *extack)
{
struct switchdev_notifier_port_obj_info obj_info = {
.info = {
.dev = dev,
.extack = extack,
.ctx = ctx,
},
.obj = &mdb->obj,
};
int err;
err = nb->notifier_call(nb, action, &obj_info);
return notifier_to_errno(err);
}
static int br_switchdev_mdb_queue_one(struct list_head *mdb_list,
enum switchdev_obj_id id,
const struct net_bridge_mdb_entry *mp,
struct net_device *orig_dev)
{
struct switchdev_obj_port_mdb *mdb;
mdb = kzalloc(sizeof(*mdb), GFP_ATOMIC);
if (!mdb)
return -ENOMEM;
mdb->obj.id = id;
mdb->obj.orig_dev = orig_dev;
br_switchdev_mdb_populate(mdb, mp);
list_add_tail(&mdb->obj.list, mdb_list);
return 0;
}
void br_switchdev_mdb_notify(struct net_device *dev,
struct net_bridge_mdb_entry *mp,
struct net_bridge_port_group *pg,
int type)
{
struct br_switchdev_mdb_complete_info *complete_info;
struct switchdev_obj_port_mdb mdb = {
.obj = {
.id = SWITCHDEV_OBJ_ID_PORT_MDB,
.flags = SWITCHDEV_F_DEFER,
},
};
if (!pg)
return br_switchdev_host_mdb(dev, mp, type);
br_switchdev_mdb_populate(&mdb, mp);
mdb.obj.orig_dev = pg->key.port->dev;
switch (type) {
case RTM_NEWMDB:
complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
if (!complete_info)
break;
complete_info->port = pg->key.port;
complete_info->ip = mp->addr;
mdb.obj.complete_priv = complete_info;
mdb.obj.complete = br_switchdev_mdb_complete;
if (switchdev_port_obj_add(pg->key.port->dev, &mdb.obj, NULL))
kfree(complete_info);
break;
case RTM_DELMDB:
switchdev_port_obj_del(pg->key.port->dev, &mdb.obj);
break;
}
}
#endif
static int
br_switchdev_mdb_replay(struct net_device *br_dev, struct net_device *dev,
const void *ctx, bool adding, struct notifier_block *nb,
struct netlink_ext_ack *extack)
{
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
const struct net_bridge_mdb_entry *mp;
struct switchdev_obj *obj, *tmp;
struct net_bridge *br;
unsigned long action;
LIST_HEAD(mdb_list);
int err = 0;
ASSERT_RTNL();
if (!nb)
return 0;
if (!netif_is_bridge_master(br_dev) || !netif_is_bridge_port(dev))
return -EINVAL;
br = netdev_priv(br_dev);
if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
return 0;
/* We cannot walk over br->mdb_list protected just by the rtnl_mutex,
* because the write-side protection is br->multicast_lock. But we
* need to emulate the [ blocking ] calling context of a regular
* switchdev event, so since both br->multicast_lock and RCU read side
* critical sections are atomic, we have no choice but to pick the RCU
* read side lock, queue up all our events, leave the critical section
* and notify switchdev from blocking context.
*/
rcu_read_lock();
hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
struct net_bridge_port_group __rcu * const *pp;
const struct net_bridge_port_group *p;
if (mp->host_joined) {
err = br_switchdev_mdb_queue_one(&mdb_list,
SWITCHDEV_OBJ_ID_HOST_MDB,
mp, br_dev);
if (err) {
rcu_read_unlock();
goto out_free_mdb;
}
}
for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
pp = &p->next) {
if (p->key.port->dev != dev)
continue;
err = br_switchdev_mdb_queue_one(&mdb_list,
SWITCHDEV_OBJ_ID_PORT_MDB,
mp, dev);
if (err) {
rcu_read_unlock();
goto out_free_mdb;
}
}
}
rcu_read_unlock();
if (adding)
action = SWITCHDEV_PORT_OBJ_ADD;
else
action = SWITCHDEV_PORT_OBJ_DEL;
list_for_each_entry(obj, &mdb_list, list) {
err = br_switchdev_mdb_replay_one(nb, dev,
SWITCHDEV_OBJ_PORT_MDB(obj),
action, ctx, extack);
if (err == -EOPNOTSUPP)
err = 0;
if (err)
goto out_free_mdb;
}
out_free_mdb:
list_for_each_entry_safe(obj, tmp, &mdb_list, list) {
list_del(&obj->list);
kfree(SWITCHDEV_OBJ_PORT_MDB(obj));
}
if (err)
return err;
#endif
return 0;
}
static int nbp_switchdev_sync_objs(struct net_bridge_port *p, const void *ctx,
struct notifier_block *atomic_nb,
struct notifier_block *blocking_nb,
struct netlink_ext_ack *extack)
{
struct net_device *br_dev = p->br->dev;
struct net_device *dev = p->dev;
int err;
err = br_switchdev_vlan_replay(br_dev, ctx, true, blocking_nb, extack);
if (err && err != -EOPNOTSUPP)
return err;
err = br_switchdev_mdb_replay(br_dev, dev, ctx, true, blocking_nb,
extack);
if (err) {
/* -EOPNOTSUPP not propagated from MDB replay. */
return err;
}
err = br_switchdev_fdb_replay(br_dev, ctx, true, atomic_nb);
if (err && err != -EOPNOTSUPP)
return err;
return 0;
}
static void nbp_switchdev_unsync_objs(struct net_bridge_port *p,
const void *ctx,
struct notifier_block *atomic_nb,
struct notifier_block *blocking_nb)
{
struct net_device *br_dev = p->br->dev;
struct net_device *dev = p->dev;
br_switchdev_fdb_replay(br_dev, ctx, false, atomic_nb);
br_switchdev_mdb_replay(br_dev, dev, ctx, false, blocking_nb, NULL);
br_switchdev_vlan_replay(br_dev, ctx, false, blocking_nb, NULL);
}
/* Let the bridge know that this port is offloaded, so that it can assign a
* switchdev hardware domain to it.
*/
int br_switchdev_port_offload(struct net_bridge_port *p,
struct net_device *dev, const void *ctx,
struct notifier_block *atomic_nb,
struct notifier_block *blocking_nb,
bool tx_fwd_offload,
struct netlink_ext_ack *extack)
{
struct netdev_phys_item_id ppid;
int err;
err = dev_get_port_parent_id(dev, &ppid, false);
if (err)
return err;
err = nbp_switchdev_add(p, ppid, tx_fwd_offload, extack);
if (err)
return err;
err = nbp_switchdev_sync_objs(p, ctx, atomic_nb, blocking_nb, extack);
if (err)
goto out_switchdev_del;
return 0;
out_switchdev_del:
nbp_switchdev_del(p);
return err;
}
void br_switchdev_port_unoffload(struct net_bridge_port *p, const void *ctx,
struct notifier_block *atomic_nb,
struct notifier_block *blocking_nb)
{
nbp_switchdev_unsync_objs(p, ctx, atomic_nb, blocking_nb);
nbp_switchdev_del(p);
}
int br_switchdev_port_replay(struct net_bridge_port *p,
struct net_device *dev, const void *ctx,
struct notifier_block *atomic_nb,
struct notifier_block *blocking_nb,
struct netlink_ext_ack *extack)
{
return nbp_switchdev_sync_objs(p, ctx, atomic_nb, blocking_nb, extack);
}
| linux-master | net/bridge/br_switchdev.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Bridge Multiple Spanning Tree Support
*
* Authors:
* Tobias Waldekranz <[email protected]>
*/
#include <linux/kernel.h>
#include <net/switchdev.h>
#include "br_private.h"
DEFINE_STATIC_KEY_FALSE(br_mst_used);
bool br_mst_enabled(const struct net_device *dev)
{
if (!netif_is_bridge_master(dev))
return false;
return br_opt_get(netdev_priv(dev), BROPT_MST_ENABLED);
}
EXPORT_SYMBOL_GPL(br_mst_enabled);
int br_mst_get_info(const struct net_device *dev, u16 msti, unsigned long *vids)
{
const struct net_bridge_vlan_group *vg;
const struct net_bridge_vlan *v;
const struct net_bridge *br;
ASSERT_RTNL();
if (!netif_is_bridge_master(dev))
return -EINVAL;
br = netdev_priv(dev);
if (!br_opt_get(br, BROPT_MST_ENABLED))
return -EINVAL;
vg = br_vlan_group(br);
list_for_each_entry(v, &vg->vlan_list, vlist) {
if (v->msti == msti)
__set_bit(v->vid, vids);
}
return 0;
}
EXPORT_SYMBOL_GPL(br_mst_get_info);
int br_mst_get_state(const struct net_device *dev, u16 msti, u8 *state)
{
const struct net_bridge_port *p = NULL;
const struct net_bridge_vlan_group *vg;
const struct net_bridge_vlan *v;
ASSERT_RTNL();
p = br_port_get_check_rtnl(dev);
if (!p || !br_opt_get(p->br, BROPT_MST_ENABLED))
return -EINVAL;
vg = nbp_vlan_group(p);
list_for_each_entry(v, &vg->vlan_list, vlist) {
if (v->brvlan->msti == msti) {
*state = v->state;
return 0;
}
}
return -ENOENT;
}
EXPORT_SYMBOL_GPL(br_mst_get_state);
static void br_mst_vlan_set_state(struct net_bridge_port *p, struct net_bridge_vlan *v,
u8 state)
{
struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
if (v->state == state)
return;
br_vlan_set_state(v, state);
if (v->vid == vg->pvid)
br_vlan_set_pvid_state(vg, state);
}
int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state,
struct netlink_ext_ack *extack)
{
struct switchdev_attr attr = {
.id = SWITCHDEV_ATTR_ID_PORT_MST_STATE,
.orig_dev = p->dev,
.u.mst_state = {
.msti = msti,
.state = state,
},
};
struct net_bridge_vlan_group *vg;
struct net_bridge_vlan *v;
int err;
vg = nbp_vlan_group(p);
if (!vg)
return 0;
/* MSTI 0 (CST) state changes are notified via the regular
* SWITCHDEV_ATTR_ID_PORT_STP_STATE.
*/
if (msti) {
err = switchdev_port_attr_set(p->dev, &attr, extack);
if (err && err != -EOPNOTSUPP)
return err;
}
list_for_each_entry(v, &vg->vlan_list, vlist) {
if (v->brvlan->msti != msti)
continue;
br_mst_vlan_set_state(p, v, state);
}
return 0;
}
static void br_mst_vlan_sync_state(struct net_bridge_vlan *pv, u16 msti)
{
struct net_bridge_vlan_group *vg = nbp_vlan_group(pv->port);
struct net_bridge_vlan *v;
list_for_each_entry(v, &vg->vlan_list, vlist) {
/* If this port already has a defined state in this
* MSTI (through some other VLAN membership), inherit
* it.
*/
if (v != pv && v->brvlan->msti == msti) {
br_mst_vlan_set_state(pv->port, pv, v->state);
return;
}
}
/* Otherwise, start out in a new MSTI with all ports disabled. */
return br_mst_vlan_set_state(pv->port, pv, BR_STATE_DISABLED);
}
int br_mst_vlan_set_msti(struct net_bridge_vlan *mv, u16 msti)
{
struct switchdev_attr attr = {
.id = SWITCHDEV_ATTR_ID_VLAN_MSTI,
.orig_dev = mv->br->dev,
.u.vlan_msti = {
.vid = mv->vid,
.msti = msti,
},
};
struct net_bridge_vlan_group *vg;
struct net_bridge_vlan *pv;
struct net_bridge_port *p;
int err;
if (mv->msti == msti)
return 0;
err = switchdev_port_attr_set(mv->br->dev, &attr, NULL);
if (err && err != -EOPNOTSUPP)
return err;
mv->msti = msti;
list_for_each_entry(p, &mv->br->port_list, list) {
vg = nbp_vlan_group(p);
pv = br_vlan_find(vg, mv->vid);
if (pv)
br_mst_vlan_sync_state(pv, msti);
}
return 0;
}
void br_mst_vlan_init_state(struct net_bridge_vlan *v)
{
/* VLANs always start out in MSTI 0 (CST) */
v->msti = 0;
if (br_vlan_is_master(v))
v->state = BR_STATE_FORWARDING;
else
v->state = v->port->state;
}
int br_mst_set_enabled(struct net_bridge *br, bool on,
struct netlink_ext_ack *extack)
{
struct switchdev_attr attr = {
.id = SWITCHDEV_ATTR_ID_BRIDGE_MST,
.orig_dev = br->dev,
.u.mst = on,
};
struct net_bridge_vlan_group *vg;
struct net_bridge_port *p;
int err;
list_for_each_entry(p, &br->port_list, list) {
vg = nbp_vlan_group(p);
if (!vg->num_vlans)
continue;
NL_SET_ERR_MSG(extack,
"MST mode can't be changed while VLANs exist");
return -EBUSY;
}
if (br_opt_get(br, BROPT_MST_ENABLED) == on)
return 0;
err = switchdev_port_attr_set(br->dev, &attr, extack);
if (err && err != -EOPNOTSUPP)
return err;
if (on)
static_branch_enable(&br_mst_used);
else
static_branch_disable(&br_mst_used);
br_opt_toggle(br, BROPT_MST_ENABLED, on);
return 0;
}
size_t br_mst_info_size(const struct net_bridge_vlan_group *vg)
{
DECLARE_BITMAP(seen, VLAN_N_VID) = { 0 };
const struct net_bridge_vlan *v;
size_t sz;
/* IFLA_BRIDGE_MST */
sz = nla_total_size(0);
list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
if (test_bit(v->brvlan->msti, seen))
continue;
/* IFLA_BRIDGE_MST_ENTRY */
sz += nla_total_size(0) +
/* IFLA_BRIDGE_MST_ENTRY_MSTI */
nla_total_size(sizeof(u16)) +
/* IFLA_BRIDGE_MST_ENTRY_STATE */
nla_total_size(sizeof(u8));
__set_bit(v->brvlan->msti, seen);
}
return sz;
}
int br_mst_fill_info(struct sk_buff *skb,
const struct net_bridge_vlan_group *vg)
{
DECLARE_BITMAP(seen, VLAN_N_VID) = { 0 };
const struct net_bridge_vlan *v;
struct nlattr *nest;
int err = 0;
list_for_each_entry(v, &vg->vlan_list, vlist) {
if (test_bit(v->brvlan->msti, seen))
continue;
nest = nla_nest_start_noflag(skb, IFLA_BRIDGE_MST_ENTRY);
if (!nest ||
nla_put_u16(skb, IFLA_BRIDGE_MST_ENTRY_MSTI, v->brvlan->msti) ||
nla_put_u8(skb, IFLA_BRIDGE_MST_ENTRY_STATE, v->state)) {
err = -EMSGSIZE;
break;
}
nla_nest_end(skb, nest);
__set_bit(v->brvlan->msti, seen);
}
return err;
}
static const struct nla_policy br_mst_nl_policy[IFLA_BRIDGE_MST_ENTRY_MAX + 1] = {
[IFLA_BRIDGE_MST_ENTRY_MSTI] = NLA_POLICY_RANGE(NLA_U16,
1, /* 0 reserved for CST */
VLAN_N_VID - 1),
[IFLA_BRIDGE_MST_ENTRY_STATE] = NLA_POLICY_RANGE(NLA_U8,
BR_STATE_DISABLED,
BR_STATE_BLOCKING),
};
static int br_mst_process_one(struct net_bridge_port *p,
const struct nlattr *attr,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[IFLA_BRIDGE_MST_ENTRY_MAX + 1];
u16 msti;
u8 state;
int err;
err = nla_parse_nested(tb, IFLA_BRIDGE_MST_ENTRY_MAX, attr,
br_mst_nl_policy, extack);
if (err)
return err;
if (!tb[IFLA_BRIDGE_MST_ENTRY_MSTI]) {
NL_SET_ERR_MSG_MOD(extack, "MSTI not specified");
return -EINVAL;
}
if (!tb[IFLA_BRIDGE_MST_ENTRY_STATE]) {
NL_SET_ERR_MSG_MOD(extack, "State not specified");
return -EINVAL;
}
msti = nla_get_u16(tb[IFLA_BRIDGE_MST_ENTRY_MSTI]);
state = nla_get_u8(tb[IFLA_BRIDGE_MST_ENTRY_STATE]);
return br_mst_set_state(p, msti, state, extack);
}
int br_mst_process(struct net_bridge_port *p, const struct nlattr *mst_attr,
struct netlink_ext_ack *extack)
{
struct nlattr *attr;
int err, msts = 0;
int rem;
if (!br_opt_get(p->br, BROPT_MST_ENABLED)) {
NL_SET_ERR_MSG_MOD(extack, "Can't modify MST state when MST is disabled");
return -EBUSY;
}
nla_for_each_nested(attr, mst_attr, rem) {
switch (nla_type(attr)) {
case IFLA_BRIDGE_MST_ENTRY:
err = br_mst_process_one(p, attr, extack);
break;
default:
continue;
}
msts++;
if (err)
break;
}
if (!msts) {
NL_SET_ERR_MSG_MOD(extack, "Found no MST entries to process");
err = -EINVAL;
}
return err;
}
| linux-master | net/bridge/br_mst.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Spanning tree protocol; timer-related code
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/times.h>
#include "br_private.h"
#include "br_private_stp.h"
/* called under bridge lock */
static int br_is_designated_for_some_port(const struct net_bridge *br)
{
struct net_bridge_port *p;
list_for_each_entry(p, &br->port_list, list) {
if (p->state != BR_STATE_DISABLED &&
!memcmp(&p->designated_bridge, &br->bridge_id, 8))
return 1;
}
return 0;
}
static void br_hello_timer_expired(struct timer_list *t)
{
struct net_bridge *br = from_timer(br, t, hello_timer);
br_debug(br, "hello timer expired\n");
spin_lock(&br->lock);
if (br->dev->flags & IFF_UP) {
br_config_bpdu_generation(br);
if (br->stp_enabled == BR_KERNEL_STP)
mod_timer(&br->hello_timer,
round_jiffies(jiffies + br->hello_time));
}
spin_unlock(&br->lock);
}
static void br_message_age_timer_expired(struct timer_list *t)
{
struct net_bridge_port *p = from_timer(p, t, message_age_timer);
struct net_bridge *br = p->br;
const bridge_id *id = &p->designated_bridge;
int was_root;
if (p->state == BR_STATE_DISABLED)
return;
br_info(br, "port %u(%s) neighbor %.2x%.2x.%pM lost\n",
(unsigned int) p->port_no, p->dev->name,
id->prio[0], id->prio[1], &id->addr);
/*
* According to the spec, the message age timer cannot be
* running when we are the root bridge. So.. this was_root
* check is redundant. I'm leaving it in for now, though.
*/
spin_lock(&br->lock);
if (p->state == BR_STATE_DISABLED)
goto unlock;
was_root = br_is_root_bridge(br);
br_become_designated_port(p);
br_configuration_update(br);
br_port_state_selection(br);
if (br_is_root_bridge(br) && !was_root)
br_become_root_bridge(br);
unlock:
spin_unlock(&br->lock);
}
static void br_forward_delay_timer_expired(struct timer_list *t)
{
struct net_bridge_port *p = from_timer(p, t, forward_delay_timer);
struct net_bridge *br = p->br;
br_debug(br, "port %u(%s) forward delay timer\n",
(unsigned int) p->port_no, p->dev->name);
spin_lock(&br->lock);
if (p->state == BR_STATE_LISTENING) {
br_set_state(p, BR_STATE_LEARNING);
mod_timer(&p->forward_delay_timer,
jiffies + br->forward_delay);
} else if (p->state == BR_STATE_LEARNING) {
br_set_state(p, BR_STATE_FORWARDING);
if (br_is_designated_for_some_port(br))
br_topology_change_detection(br);
netif_carrier_on(br->dev);
}
rcu_read_lock();
br_ifinfo_notify(RTM_NEWLINK, NULL, p);
rcu_read_unlock();
spin_unlock(&br->lock);
}
static void br_tcn_timer_expired(struct timer_list *t)
{
struct net_bridge *br = from_timer(br, t, tcn_timer);
br_debug(br, "tcn timer expired\n");
spin_lock(&br->lock);
if (!br_is_root_bridge(br) && (br->dev->flags & IFF_UP)) {
br_transmit_tcn(br);
mod_timer(&br->tcn_timer, jiffies + br->bridge_hello_time);
}
spin_unlock(&br->lock);
}
static void br_topology_change_timer_expired(struct timer_list *t)
{
struct net_bridge *br = from_timer(br, t, topology_change_timer);
br_debug(br, "topo change timer expired\n");
spin_lock(&br->lock);
br->topology_change_detected = 0;
__br_set_topology_change(br, 0);
spin_unlock(&br->lock);
}
static void br_hold_timer_expired(struct timer_list *t)
{
struct net_bridge_port *p = from_timer(p, t, hold_timer);
br_debug(p->br, "port %u(%s) hold timer expired\n",
(unsigned int) p->port_no, p->dev->name);
spin_lock(&p->br->lock);
if (p->config_pending)
br_transmit_config(p);
spin_unlock(&p->br->lock);
}
void br_stp_timer_init(struct net_bridge *br)
{
timer_setup(&br->hello_timer, br_hello_timer_expired, 0);
timer_setup(&br->tcn_timer, br_tcn_timer_expired, 0);
timer_setup(&br->topology_change_timer,
br_topology_change_timer_expired, 0);
}
void br_stp_port_timer_init(struct net_bridge_port *p)
{
timer_setup(&p->message_age_timer, br_message_age_timer_expired, 0);
timer_setup(&p->forward_delay_timer, br_forward_delay_timer_expired, 0);
timer_setup(&p->hold_timer, br_hold_timer_expired, 0);
}
/* Report ticks left (in USER_HZ) used for API */
unsigned long br_timer_value(const struct timer_list *timer)
{
return timer_pending(timer)
? jiffies_delta_to_clock_t(timer->expires - jiffies) : 0;
}
| linux-master | net/bridge/br_stp_timer.c |
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (c) 2020, Nikolay Aleksandrov <[email protected]>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <net/ip_tunnels.h>
#include "br_private.h"
#include "br_private_tunnel.h"
static bool __vlan_tun_put(struct sk_buff *skb, const struct net_bridge_vlan *v)
{
__be32 tid = tunnel_id_to_key32(v->tinfo.tunnel_id);
struct nlattr *nest;
if (!v->tinfo.tunnel_dst)
return true;
nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY_TUNNEL_INFO);
if (!nest)
return false;
if (nla_put_u32(skb, BRIDGE_VLANDB_TINFO_ID, be32_to_cpu(tid))) {
nla_nest_cancel(skb, nest);
return false;
}
nla_nest_end(skb, nest);
return true;
}
static bool __vlan_tun_can_enter_range(const struct net_bridge_vlan *v_curr,
const struct net_bridge_vlan *range_end)
{
return (!v_curr->tinfo.tunnel_dst && !range_end->tinfo.tunnel_dst) ||
vlan_tunid_inrange(v_curr, range_end);
}
/* check if the options' state of v_curr allow it to enter the range */
bool br_vlan_opts_eq_range(const struct net_bridge_vlan *v_curr,
const struct net_bridge_vlan *range_end)
{
u8 range_mc_rtr = br_vlan_multicast_router(range_end);
u8 curr_mc_rtr = br_vlan_multicast_router(v_curr);
return v_curr->state == range_end->state &&
__vlan_tun_can_enter_range(v_curr, range_end) &&
curr_mc_rtr == range_mc_rtr;
}
bool br_vlan_opts_fill(struct sk_buff *skb, const struct net_bridge_vlan *v,
const struct net_bridge_port *p)
{
if (nla_put_u8(skb, BRIDGE_VLANDB_ENTRY_STATE, br_vlan_get_state(v)) ||
!__vlan_tun_put(skb, v) ||
nla_put_u8(skb, BRIDGE_VLANDB_ENTRY_NEIGH_SUPPRESS,
!!(v->priv_flags & BR_VLFLAG_NEIGH_SUPPRESS_ENABLED)))
return false;
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
if (nla_put_u8(skb, BRIDGE_VLANDB_ENTRY_MCAST_ROUTER,
br_vlan_multicast_router(v)))
return false;
if (p && !br_multicast_port_ctx_vlan_disabled(&v->port_mcast_ctx) &&
(nla_put_u32(skb, BRIDGE_VLANDB_ENTRY_MCAST_N_GROUPS,
br_multicast_ngroups_get(&v->port_mcast_ctx)) ||
nla_put_u32(skb, BRIDGE_VLANDB_ENTRY_MCAST_MAX_GROUPS,
br_multicast_ngroups_get_max(&v->port_mcast_ctx))))
return false;
#endif
return true;
}
size_t br_vlan_opts_nl_size(void)
{
return nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_ENTRY_STATE */
+ nla_total_size(0) /* BRIDGE_VLANDB_ENTRY_TUNNEL_INFO */
+ nla_total_size(sizeof(u32)) /* BRIDGE_VLANDB_TINFO_ID */
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+ nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_ENTRY_MCAST_ROUTER */
+ nla_total_size(sizeof(u32)) /* BRIDGE_VLANDB_ENTRY_MCAST_N_GROUPS */
+ nla_total_size(sizeof(u32)) /* BRIDGE_VLANDB_ENTRY_MCAST_MAX_GROUPS */
#endif
+ nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_ENTRY_NEIGH_SUPPRESS */
+ 0;
}
static int br_vlan_modify_state(struct net_bridge_vlan_group *vg,
struct net_bridge_vlan *v,
u8 state,
bool *changed,
struct netlink_ext_ack *extack)
{
struct net_bridge *br;
ASSERT_RTNL();
if (state > BR_STATE_BLOCKING) {
NL_SET_ERR_MSG_MOD(extack, "Invalid vlan state");
return -EINVAL;
}
if (br_vlan_is_brentry(v))
br = v->br;
else
br = v->port->br;
if (br->stp_enabled == BR_KERNEL_STP) {
NL_SET_ERR_MSG_MOD(extack, "Can't modify vlan state when using kernel STP");
return -EBUSY;
}
if (br_opt_get(br, BROPT_MST_ENABLED)) {
NL_SET_ERR_MSG_MOD(extack, "Can't modify vlan state directly when MST is enabled");
return -EBUSY;
}
if (v->state == state)
return 0;
if (v->vid == br_get_pvid(vg))
br_vlan_set_pvid_state(vg, state);
br_vlan_set_state(v, state);
*changed = true;
return 0;
}
static const struct nla_policy br_vlandb_tinfo_pol[BRIDGE_VLANDB_TINFO_MAX + 1] = {
[BRIDGE_VLANDB_TINFO_ID] = { .type = NLA_U32 },
[BRIDGE_VLANDB_TINFO_CMD] = { .type = NLA_U32 },
};
static int br_vlan_modify_tunnel(const struct net_bridge_port *p,
struct net_bridge_vlan *v,
struct nlattr **tb,
bool *changed,
struct netlink_ext_ack *extack)
{
struct nlattr *tun_tb[BRIDGE_VLANDB_TINFO_MAX + 1], *attr;
struct bridge_vlan_info *vinfo;
u32 tun_id = 0;
int cmd, err;
if (!p) {
NL_SET_ERR_MSG_MOD(extack, "Can't modify tunnel mapping of non-port vlans");
return -EINVAL;
}
if (!(p->flags & BR_VLAN_TUNNEL)) {
NL_SET_ERR_MSG_MOD(extack, "Port doesn't have tunnel flag set");
return -EINVAL;
}
attr = tb[BRIDGE_VLANDB_ENTRY_TUNNEL_INFO];
err = nla_parse_nested(tun_tb, BRIDGE_VLANDB_TINFO_MAX, attr,
br_vlandb_tinfo_pol, extack);
if (err)
return err;
if (!tun_tb[BRIDGE_VLANDB_TINFO_CMD]) {
NL_SET_ERR_MSG_MOD(extack, "Missing tunnel command attribute");
return -ENOENT;
}
cmd = nla_get_u32(tun_tb[BRIDGE_VLANDB_TINFO_CMD]);
switch (cmd) {
case RTM_SETLINK:
if (!tun_tb[BRIDGE_VLANDB_TINFO_ID]) {
NL_SET_ERR_MSG_MOD(extack, "Missing tunnel id attribute");
return -ENOENT;
}
/* when working on vlan ranges this is the starting tunnel id */
tun_id = nla_get_u32(tun_tb[BRIDGE_VLANDB_TINFO_ID]);
/* vlan info attr is guaranteed by br_vlan_rtm_process_one */
vinfo = nla_data(tb[BRIDGE_VLANDB_ENTRY_INFO]);
/* tunnel ids are mapped to each vlan in increasing order,
* the starting vlan is in BRIDGE_VLANDB_ENTRY_INFO and v is the
* current vlan, so we compute: tun_id + v - vinfo->vid
*/
tun_id += v->vid - vinfo->vid;
break;
case RTM_DELLINK:
break;
default:
NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel command");
return -EINVAL;
}
return br_vlan_tunnel_info(p, cmd, v->vid, tun_id, changed);
}
static int br_vlan_process_one_opts(const struct net_bridge *br,
const struct net_bridge_port *p,
struct net_bridge_vlan_group *vg,
struct net_bridge_vlan *v,
struct nlattr **tb,
bool *changed,
struct netlink_ext_ack *extack)
{
int err;
*changed = false;
if (tb[BRIDGE_VLANDB_ENTRY_STATE]) {
u8 state = nla_get_u8(tb[BRIDGE_VLANDB_ENTRY_STATE]);
err = br_vlan_modify_state(vg, v, state, changed, extack);
if (err)
return err;
}
if (tb[BRIDGE_VLANDB_ENTRY_TUNNEL_INFO]) {
err = br_vlan_modify_tunnel(p, v, tb, changed, extack);
if (err)
return err;
}
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
if (tb[BRIDGE_VLANDB_ENTRY_MCAST_ROUTER]) {
u8 val;
val = nla_get_u8(tb[BRIDGE_VLANDB_ENTRY_MCAST_ROUTER]);
err = br_multicast_set_vlan_router(v, val);
if (err)
return err;
*changed = true;
}
if (tb[BRIDGE_VLANDB_ENTRY_MCAST_MAX_GROUPS]) {
u32 val;
if (!p) {
NL_SET_ERR_MSG_MOD(extack, "Can't set mcast_max_groups for non-port vlans");
return -EINVAL;
}
if (br_multicast_port_ctx_vlan_disabled(&v->port_mcast_ctx)) {
NL_SET_ERR_MSG_MOD(extack, "Multicast snooping disabled on this VLAN");
return -EINVAL;
}
val = nla_get_u32(tb[BRIDGE_VLANDB_ENTRY_MCAST_MAX_GROUPS]);
br_multicast_ngroups_set_max(&v->port_mcast_ctx, val);
*changed = true;
}
#endif
if (tb[BRIDGE_VLANDB_ENTRY_NEIGH_SUPPRESS]) {
bool enabled = v->priv_flags & BR_VLFLAG_NEIGH_SUPPRESS_ENABLED;
bool val = nla_get_u8(tb[BRIDGE_VLANDB_ENTRY_NEIGH_SUPPRESS]);
if (!p) {
NL_SET_ERR_MSG_MOD(extack, "Can't set neigh_suppress for non-port vlans");
return -EINVAL;
}
if (val != enabled) {
v->priv_flags ^= BR_VLFLAG_NEIGH_SUPPRESS_ENABLED;
*changed = true;
}
}
return 0;
}
int br_vlan_process_options(const struct net_bridge *br,
const struct net_bridge_port *p,
struct net_bridge_vlan *range_start,
struct net_bridge_vlan *range_end,
struct nlattr **tb,
struct netlink_ext_ack *extack)
{
struct net_bridge_vlan *v, *curr_start = NULL, *curr_end = NULL;
struct net_bridge_vlan_group *vg;
int vid, err = 0;
u16 pvid;
if (p)
vg = nbp_vlan_group(p);
else
vg = br_vlan_group(br);
if (!range_start || !br_vlan_should_use(range_start)) {
NL_SET_ERR_MSG_MOD(extack, "Vlan range start doesn't exist, can't process options");
return -ENOENT;
}
if (!range_end || !br_vlan_should_use(range_end)) {
NL_SET_ERR_MSG_MOD(extack, "Vlan range end doesn't exist, can't process options");
return -ENOENT;
}
pvid = br_get_pvid(vg);
for (vid = range_start->vid; vid <= range_end->vid; vid++) {
bool changed = false;
v = br_vlan_find(vg, vid);
if (!v || !br_vlan_should_use(v)) {
NL_SET_ERR_MSG_MOD(extack, "Vlan in range doesn't exist, can't process options");
err = -ENOENT;
break;
}
err = br_vlan_process_one_opts(br, p, vg, v, tb, &changed,
extack);
if (err)
break;
if (changed) {
/* vlan options changed, check for range */
if (!curr_start) {
curr_start = v;
curr_end = v;
continue;
}
if (v->vid == pvid ||
!br_vlan_can_enter_range(v, curr_end)) {
br_vlan_notify(br, p, curr_start->vid,
curr_end->vid, RTM_NEWVLAN);
curr_start = v;
}
curr_end = v;
} else {
/* nothing changed and nothing to notify yet */
if (!curr_start)
continue;
br_vlan_notify(br, p, curr_start->vid, curr_end->vid,
RTM_NEWVLAN);
curr_start = NULL;
curr_end = NULL;
}
}
if (curr_start)
br_vlan_notify(br, p, curr_start->vid, curr_end->vid,
RTM_NEWVLAN);
return err;
}
bool br_vlan_global_opts_can_enter_range(const struct net_bridge_vlan *v_curr,
const struct net_bridge_vlan *r_end)
{
return v_curr->vid - r_end->vid == 1 &&
v_curr->msti == r_end->msti &&
((v_curr->priv_flags ^ r_end->priv_flags) &
BR_VLFLAG_GLOBAL_MCAST_ENABLED) == 0 &&
br_multicast_ctx_options_equal(&v_curr->br_mcast_ctx,
&r_end->br_mcast_ctx);
}
bool br_vlan_global_opts_fill(struct sk_buff *skb, u16 vid, u16 vid_range,
const struct net_bridge_vlan *v_opts)
{
struct nlattr *nest2 __maybe_unused;
u64 clockval __maybe_unused;
struct nlattr *nest;
nest = nla_nest_start(skb, BRIDGE_VLANDB_GLOBAL_OPTIONS);
if (!nest)
return false;
if (nla_put_u16(skb, BRIDGE_VLANDB_GOPTS_ID, vid))
goto out_err;
if (vid_range && vid < vid_range &&
nla_put_u16(skb, BRIDGE_VLANDB_GOPTS_RANGE, vid_range))
goto out_err;
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
if (nla_put_u8(skb, BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING,
!!(v_opts->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED)) ||
nla_put_u8(skb, BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION,
v_opts->br_mcast_ctx.multicast_igmp_version) ||
nla_put_u32(skb, BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT,
v_opts->br_mcast_ctx.multicast_last_member_count) ||
nla_put_u32(skb, BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT,
v_opts->br_mcast_ctx.multicast_startup_query_count) ||
nla_put_u8(skb, BRIDGE_VLANDB_GOPTS_MCAST_QUERIER,
v_opts->br_mcast_ctx.multicast_querier) ||
br_multicast_dump_querier_state(skb, &v_opts->br_mcast_ctx,
BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_STATE))
goto out_err;
clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_last_member_interval);
if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL,
clockval, BRIDGE_VLANDB_GOPTS_PAD))
goto out_err;
clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_membership_interval);
if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL,
clockval, BRIDGE_VLANDB_GOPTS_PAD))
goto out_err;
clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_querier_interval);
if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL,
clockval, BRIDGE_VLANDB_GOPTS_PAD))
goto out_err;
clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_query_interval);
if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL,
clockval, BRIDGE_VLANDB_GOPTS_PAD))
goto out_err;
clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_query_response_interval);
if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL,
clockval, BRIDGE_VLANDB_GOPTS_PAD))
goto out_err;
clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_startup_query_interval);
if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL,
clockval, BRIDGE_VLANDB_GOPTS_PAD))
goto out_err;
if (br_rports_have_mc_router(&v_opts->br_mcast_ctx)) {
nest2 = nla_nest_start(skb,
BRIDGE_VLANDB_GOPTS_MCAST_ROUTER_PORTS);
if (!nest2)
goto out_err;
rcu_read_lock();
if (br_rports_fill_info(skb, &v_opts->br_mcast_ctx)) {
rcu_read_unlock();
nla_nest_cancel(skb, nest2);
goto out_err;
}
rcu_read_unlock();
nla_nest_end(skb, nest2);
}
#if IS_ENABLED(CONFIG_IPV6)
if (nla_put_u8(skb, BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION,
v_opts->br_mcast_ctx.multicast_mld_version))
goto out_err;
#endif
#endif
if (nla_put_u16(skb, BRIDGE_VLANDB_GOPTS_MSTI, v_opts->msti))
goto out_err;
nla_nest_end(skb, nest);
return true;
out_err:
nla_nest_cancel(skb, nest);
return false;
}
static size_t rtnl_vlan_global_opts_nlmsg_size(const struct net_bridge_vlan *v)
{
return NLMSG_ALIGN(sizeof(struct br_vlan_msg))
+ nla_total_size(0) /* BRIDGE_VLANDB_GLOBAL_OPTIONS */
+ nla_total_size(sizeof(u16)) /* BRIDGE_VLANDB_GOPTS_ID */
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+ nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING */
+ nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION */
+ nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION */
+ nla_total_size(sizeof(u32)) /* BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT */
+ nla_total_size(sizeof(u32)) /* BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT */
+ nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL */
+ nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL */
+ nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL */
+ nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL */
+ nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL */
+ nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL */
+ nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_GOPTS_MCAST_QUERIER */
+ br_multicast_querier_state_size() /* BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_STATE */
+ nla_total_size(0) /* BRIDGE_VLANDB_GOPTS_MCAST_ROUTER_PORTS */
+ br_rports_size(&v->br_mcast_ctx) /* BRIDGE_VLANDB_GOPTS_MCAST_ROUTER_PORTS */
#endif
+ nla_total_size(sizeof(u16)) /* BRIDGE_VLANDB_GOPTS_MSTI */
+ nla_total_size(sizeof(u16)); /* BRIDGE_VLANDB_GOPTS_RANGE */
}
static void br_vlan_global_opts_notify(const struct net_bridge *br,
u16 vid, u16 vid_range)
{
struct net_bridge_vlan *v;
struct br_vlan_msg *bvm;
struct nlmsghdr *nlh;
struct sk_buff *skb;
int err = -ENOBUFS;
/* right now notifications are done only with rtnl held */
ASSERT_RTNL();
/* need to find the vlan due to flags/options */
v = br_vlan_find(br_vlan_group(br), vid);
if (!v)
return;
skb = nlmsg_new(rtnl_vlan_global_opts_nlmsg_size(v), GFP_KERNEL);
if (!skb)
goto out_err;
err = -EMSGSIZE;
nlh = nlmsg_put(skb, 0, 0, RTM_NEWVLAN, sizeof(*bvm), 0);
if (!nlh)
goto out_err;
bvm = nlmsg_data(nlh);
memset(bvm, 0, sizeof(*bvm));
bvm->family = AF_BRIDGE;
bvm->ifindex = br->dev->ifindex;
if (!br_vlan_global_opts_fill(skb, vid, vid_range, v))
goto out_err;
nlmsg_end(skb, nlh);
rtnl_notify(skb, dev_net(br->dev), 0, RTNLGRP_BRVLAN, NULL, GFP_KERNEL);
return;
out_err:
rtnl_set_sk_err(dev_net(br->dev), RTNLGRP_BRVLAN, err);
kfree_skb(skb);
}
static int br_vlan_process_global_one_opts(const struct net_bridge *br,
struct net_bridge_vlan_group *vg,
struct net_bridge_vlan *v,
struct nlattr **tb,
bool *changed,
struct netlink_ext_ack *extack)
{
int err __maybe_unused;
*changed = false;
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
if (tb[BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING]) {
u8 mc_snooping;
mc_snooping = nla_get_u8(tb[BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING]);
if (br_multicast_toggle_global_vlan(v, !!mc_snooping))
*changed = true;
}
if (tb[BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION]) {
u8 ver;
ver = nla_get_u8(tb[BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION]);
err = br_multicast_set_igmp_version(&v->br_mcast_ctx, ver);
if (err)
return err;
*changed = true;
}
if (tb[BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT]) {
u32 cnt;
cnt = nla_get_u32(tb[BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT]);
v->br_mcast_ctx.multicast_last_member_count = cnt;
*changed = true;
}
if (tb[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT]) {
u32 cnt;
cnt = nla_get_u32(tb[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT]);
v->br_mcast_ctx.multicast_startup_query_count = cnt;
*changed = true;
}
if (tb[BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL]) {
u64 val;
val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL]);
v->br_mcast_ctx.multicast_last_member_interval = clock_t_to_jiffies(val);
*changed = true;
}
if (tb[BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL]) {
u64 val;
val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL]);
v->br_mcast_ctx.multicast_membership_interval = clock_t_to_jiffies(val);
*changed = true;
}
if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL]) {
u64 val;
val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL]);
v->br_mcast_ctx.multicast_querier_interval = clock_t_to_jiffies(val);
*changed = true;
}
if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL]) {
u64 val;
val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL]);
br_multicast_set_query_intvl(&v->br_mcast_ctx, val);
*changed = true;
}
if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL]) {
u64 val;
val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL]);
v->br_mcast_ctx.multicast_query_response_interval = clock_t_to_jiffies(val);
*changed = true;
}
if (tb[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL]) {
u64 val;
val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL]);
br_multicast_set_startup_query_intvl(&v->br_mcast_ctx, val);
*changed = true;
}
if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERIER]) {
u8 val;
val = nla_get_u8(tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERIER]);
err = br_multicast_set_querier(&v->br_mcast_ctx, val);
if (err)
return err;
*changed = true;
}
#if IS_ENABLED(CONFIG_IPV6)
if (tb[BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION]) {
u8 ver;
ver = nla_get_u8(tb[BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION]);
err = br_multicast_set_mld_version(&v->br_mcast_ctx, ver);
if (err)
return err;
*changed = true;
}
#endif
#endif
if (tb[BRIDGE_VLANDB_GOPTS_MSTI]) {
u16 msti;
msti = nla_get_u16(tb[BRIDGE_VLANDB_GOPTS_MSTI]);
err = br_mst_vlan_set_msti(v, msti);
if (err)
return err;
*changed = true;
}
return 0;
}
static const struct nla_policy br_vlan_db_gpol[BRIDGE_VLANDB_GOPTS_MAX + 1] = {
[BRIDGE_VLANDB_GOPTS_ID] = { .type = NLA_U16 },
[BRIDGE_VLANDB_GOPTS_RANGE] = { .type = NLA_U16 },
[BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING] = { .type = NLA_U8 },
[BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION] = { .type = NLA_U8 },
[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL] = { .type = NLA_U64 },
[BRIDGE_VLANDB_GOPTS_MCAST_QUERIER] = { .type = NLA_U8 },
[BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION] = { .type = NLA_U8 },
[BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT] = { .type = NLA_U32 },
[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT] = { .type = NLA_U32 },
[BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL] = { .type = NLA_U64 },
[BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL] = { .type = NLA_U64 },
[BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL] = { .type = NLA_U64 },
[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 },
[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 },
[BRIDGE_VLANDB_GOPTS_MSTI] = NLA_POLICY_MAX(NLA_U16, VLAN_N_VID - 1),
};
int br_vlan_rtm_process_global_options(struct net_device *dev,
const struct nlattr *attr,
int cmd,
struct netlink_ext_ack *extack)
{
struct net_bridge_vlan *v, *curr_start = NULL, *curr_end = NULL;
struct nlattr *tb[BRIDGE_VLANDB_GOPTS_MAX + 1];
struct net_bridge_vlan_group *vg;
u16 vid, vid_range = 0;
struct net_bridge *br;
int err = 0;
if (cmd != RTM_NEWVLAN) {
NL_SET_ERR_MSG_MOD(extack, "Global vlan options support only set operation");
return -EINVAL;
}
if (!netif_is_bridge_master(dev)) {
NL_SET_ERR_MSG_MOD(extack, "Global vlan options can only be set on bridge device");
return -EINVAL;
}
br = netdev_priv(dev);
vg = br_vlan_group(br);
if (WARN_ON(!vg))
return -ENODEV;
err = nla_parse_nested(tb, BRIDGE_VLANDB_GOPTS_MAX, attr,
br_vlan_db_gpol, extack);
if (err)
return err;
if (!tb[BRIDGE_VLANDB_GOPTS_ID]) {
NL_SET_ERR_MSG_MOD(extack, "Missing vlan entry id");
return -EINVAL;
}
vid = nla_get_u16(tb[BRIDGE_VLANDB_GOPTS_ID]);
if (!br_vlan_valid_id(vid, extack))
return -EINVAL;
if (tb[BRIDGE_VLANDB_GOPTS_RANGE]) {
vid_range = nla_get_u16(tb[BRIDGE_VLANDB_GOPTS_RANGE]);
if (!br_vlan_valid_id(vid_range, extack))
return -EINVAL;
if (vid >= vid_range) {
NL_SET_ERR_MSG_MOD(extack, "End vlan id is less than or equal to start vlan id");
return -EINVAL;
}
} else {
vid_range = vid;
}
for (; vid <= vid_range; vid++) {
bool changed = false;
v = br_vlan_find(vg, vid);
if (!v) {
NL_SET_ERR_MSG_MOD(extack, "Vlan in range doesn't exist, can't process global options");
err = -ENOENT;
break;
}
err = br_vlan_process_global_one_opts(br, vg, v, tb, &changed,
extack);
if (err)
break;
if (changed) {
/* vlan options changed, check for range */
if (!curr_start) {
curr_start = v;
curr_end = v;
continue;
}
if (!br_vlan_global_opts_can_enter_range(v, curr_end)) {
br_vlan_global_opts_notify(br, curr_start->vid,
curr_end->vid);
curr_start = v;
}
curr_end = v;
} else {
/* nothing changed and nothing to notify yet */
if (!curr_start)
continue;
br_vlan_global_opts_notify(br, curr_start->vid,
curr_end->vid);
curr_start = NULL;
curr_end = NULL;
}
}
if (curr_start)
br_vlan_global_opts_notify(br, curr_start->vid, curr_end->vid);
return err;
}
| linux-master | net/bridge/br_vlan_options.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/mrp_bridge.h>
#include "br_private_mrp.h"
static const u8 mrp_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x1 };
static const u8 mrp_in_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x3 };
static int br_mrp_process(struct net_bridge_port *p, struct sk_buff *skb);
static struct br_frame_type mrp_frame_type __read_mostly = {
.type = cpu_to_be16(ETH_P_MRP),
.frame_handler = br_mrp_process,
};
static bool br_mrp_is_ring_port(struct net_bridge_port *p_port,
struct net_bridge_port *s_port,
struct net_bridge_port *port)
{
if (port == p_port ||
port == s_port)
return true;
return false;
}
static bool br_mrp_is_in_port(struct net_bridge_port *i_port,
struct net_bridge_port *port)
{
if (port == i_port)
return true;
return false;
}
static struct net_bridge_port *br_mrp_get_port(struct net_bridge *br,
u32 ifindex)
{
struct net_bridge_port *res = NULL;
struct net_bridge_port *port;
list_for_each_entry(port, &br->port_list, list) {
if (port->dev->ifindex == ifindex) {
res = port;
break;
}
}
return res;
}
static struct br_mrp *br_mrp_find_id(struct net_bridge *br, u32 ring_id)
{
struct br_mrp *res = NULL;
struct br_mrp *mrp;
hlist_for_each_entry_rcu(mrp, &br->mrp_list, list,
lockdep_rtnl_is_held()) {
if (mrp->ring_id == ring_id) {
res = mrp;
break;
}
}
return res;
}
static struct br_mrp *br_mrp_find_in_id(struct net_bridge *br, u32 in_id)
{
struct br_mrp *res = NULL;
struct br_mrp *mrp;
hlist_for_each_entry_rcu(mrp, &br->mrp_list, list,
lockdep_rtnl_is_held()) {
if (mrp->in_id == in_id) {
res = mrp;
break;
}
}
return res;
}
static bool br_mrp_unique_ifindex(struct net_bridge *br, u32 ifindex)
{
struct br_mrp *mrp;
hlist_for_each_entry_rcu(mrp, &br->mrp_list, list,
lockdep_rtnl_is_held()) {
struct net_bridge_port *p;
p = rtnl_dereference(mrp->p_port);
if (p && p->dev->ifindex == ifindex)
return false;
p = rtnl_dereference(mrp->s_port);
if (p && p->dev->ifindex == ifindex)
return false;
p = rtnl_dereference(mrp->i_port);
if (p && p->dev->ifindex == ifindex)
return false;
}
return true;
}
static struct br_mrp *br_mrp_find_port(struct net_bridge *br,
struct net_bridge_port *p)
{
struct br_mrp *res = NULL;
struct br_mrp *mrp;
hlist_for_each_entry_rcu(mrp, &br->mrp_list, list,
lockdep_rtnl_is_held()) {
if (rcu_access_pointer(mrp->p_port) == p ||
rcu_access_pointer(mrp->s_port) == p ||
rcu_access_pointer(mrp->i_port) == p) {
res = mrp;
break;
}
}
return res;
}
static int br_mrp_next_seq(struct br_mrp *mrp)
{
mrp->seq_id++;
return mrp->seq_id;
}
static struct sk_buff *br_mrp_skb_alloc(struct net_bridge_port *p,
const u8 *src, const u8 *dst)
{
struct ethhdr *eth_hdr;
struct sk_buff *skb;
__be16 *version;
skb = dev_alloc_skb(MRP_MAX_FRAME_LENGTH);
if (!skb)
return NULL;
skb->dev = p->dev;
skb->protocol = htons(ETH_P_MRP);
skb->priority = MRP_FRAME_PRIO;
skb_reserve(skb, sizeof(*eth_hdr));
eth_hdr = skb_push(skb, sizeof(*eth_hdr));
ether_addr_copy(eth_hdr->h_dest, dst);
ether_addr_copy(eth_hdr->h_source, src);
eth_hdr->h_proto = htons(ETH_P_MRP);
version = skb_put(skb, sizeof(*version));
*version = cpu_to_be16(MRP_VERSION);
return skb;
}
static void br_mrp_skb_tlv(struct sk_buff *skb,
enum br_mrp_tlv_header_type type,
u8 length)
{
struct br_mrp_tlv_hdr *hdr;
hdr = skb_put(skb, sizeof(*hdr));
hdr->type = type;
hdr->length = length;
}
static void br_mrp_skb_common(struct sk_buff *skb, struct br_mrp *mrp)
{
struct br_mrp_common_hdr *hdr;
br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_COMMON, sizeof(*hdr));
hdr = skb_put(skb, sizeof(*hdr));
hdr->seq_id = cpu_to_be16(br_mrp_next_seq(mrp));
memset(hdr->domain, 0xff, MRP_DOMAIN_UUID_LENGTH);
}
static struct sk_buff *br_mrp_alloc_test_skb(struct br_mrp *mrp,
struct net_bridge_port *p,
enum br_mrp_port_role_type port_role)
{
struct br_mrp_ring_test_hdr *hdr = NULL;
struct sk_buff *skb = NULL;
if (!p)
return NULL;
skb = br_mrp_skb_alloc(p, p->dev->dev_addr, mrp_test_dmac);
if (!skb)
return NULL;
br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_RING_TEST, sizeof(*hdr));
hdr = skb_put(skb, sizeof(*hdr));
hdr->prio = cpu_to_be16(mrp->prio);
ether_addr_copy(hdr->sa, p->br->dev->dev_addr);
hdr->port_role = cpu_to_be16(port_role);
hdr->state = cpu_to_be16(mrp->ring_state);
hdr->transitions = cpu_to_be16(mrp->ring_transitions);
hdr->timestamp = cpu_to_be32(jiffies_to_msecs(jiffies));
br_mrp_skb_common(skb, mrp);
/* In case the node behaves as MRA then the Test frame needs to have
* an Option TLV which includes eventually a sub-option TLV that has
* the type AUTO_MGR
*/
if (mrp->ring_role == BR_MRP_RING_ROLE_MRA) {
struct br_mrp_sub_option1_hdr *sub_opt = NULL;
struct br_mrp_tlv_hdr *sub_tlv = NULL;
struct br_mrp_oui_hdr *oui = NULL;
u8 length;
length = sizeof(*sub_opt) + sizeof(*sub_tlv) + sizeof(oui) +
MRP_OPT_PADDING;
br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_OPTION, length);
oui = skb_put(skb, sizeof(*oui));
memset(oui, 0x0, sizeof(*oui));
sub_opt = skb_put(skb, sizeof(*sub_opt));
memset(sub_opt, 0x0, sizeof(*sub_opt));
sub_tlv = skb_put(skb, sizeof(*sub_tlv));
sub_tlv->type = BR_MRP_SUB_TLV_HEADER_TEST_AUTO_MGR;
/* 32 bit alligment shall be ensured therefore add 2 bytes */
skb_put(skb, MRP_OPT_PADDING);
}
br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_END, 0x0);
return skb;
}
static struct sk_buff *br_mrp_alloc_in_test_skb(struct br_mrp *mrp,
struct net_bridge_port *p,
enum br_mrp_port_role_type port_role)
{
struct br_mrp_in_test_hdr *hdr = NULL;
struct sk_buff *skb = NULL;
if (!p)
return NULL;
skb = br_mrp_skb_alloc(p, p->dev->dev_addr, mrp_in_test_dmac);
if (!skb)
return NULL;
br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_IN_TEST, sizeof(*hdr));
hdr = skb_put(skb, sizeof(*hdr));
hdr->id = cpu_to_be16(mrp->in_id);
ether_addr_copy(hdr->sa, p->br->dev->dev_addr);
hdr->port_role = cpu_to_be16(port_role);
hdr->state = cpu_to_be16(mrp->in_state);
hdr->transitions = cpu_to_be16(mrp->in_transitions);
hdr->timestamp = cpu_to_be32(jiffies_to_msecs(jiffies));
br_mrp_skb_common(skb, mrp);
br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_END, 0x0);
return skb;
}
/* This function is continuously called in the following cases:
* - when node role is MRM, in this case test_monitor is always set to false
* because it needs to notify the userspace that the ring is open and needs to
* send MRP_Test frames
* - when node role is MRA, there are 2 subcases:
* - when MRA behaves as MRM, in this case is similar with MRM role
* - when MRA behaves as MRC, in this case test_monitor is set to true,
* because it needs to detect when it stops seeing MRP_Test frames
* from MRM node but it doesn't need to send MRP_Test frames.
*/
static void br_mrp_test_work_expired(struct work_struct *work)
{
struct delayed_work *del_work = to_delayed_work(work);
struct br_mrp *mrp = container_of(del_work, struct br_mrp, test_work);
struct net_bridge_port *p;
bool notify_open = false;
struct sk_buff *skb;
if (time_before_eq(mrp->test_end, jiffies))
return;
if (mrp->test_count_miss < mrp->test_max_miss) {
mrp->test_count_miss++;
} else {
/* Notify that the ring is open only if the ring state is
* closed, otherwise it would continue to notify at every
* interval.
* Also notify that the ring is open when the node has the
* role MRA and behaves as MRC. The reason is that the
* userspace needs to know when the MRM stopped sending
* MRP_Test frames so that the current node to try to take
* the role of a MRM.
*/
if (mrp->ring_state == BR_MRP_RING_STATE_CLOSED ||
mrp->test_monitor)
notify_open = true;
}
rcu_read_lock();
p = rcu_dereference(mrp->p_port);
if (p) {
if (!mrp->test_monitor) {
skb = br_mrp_alloc_test_skb(mrp, p,
BR_MRP_PORT_ROLE_PRIMARY);
if (!skb)
goto out;
skb_reset_network_header(skb);
dev_queue_xmit(skb);
}
if (notify_open && !mrp->ring_role_offloaded)
br_mrp_ring_port_open(p->dev, true);
}
p = rcu_dereference(mrp->s_port);
if (p) {
if (!mrp->test_monitor) {
skb = br_mrp_alloc_test_skb(mrp, p,
BR_MRP_PORT_ROLE_SECONDARY);
if (!skb)
goto out;
skb_reset_network_header(skb);
dev_queue_xmit(skb);
}
if (notify_open && !mrp->ring_role_offloaded)
br_mrp_ring_port_open(p->dev, true);
}
out:
rcu_read_unlock();
queue_delayed_work(system_wq, &mrp->test_work,
usecs_to_jiffies(mrp->test_interval));
}
/* This function is continuously called when the node has the interconnect role
* MIM. It would generate interconnect test frames and will send them on all 3
* ports. But will also check if it stop receiving interconnect test frames.
*/
static void br_mrp_in_test_work_expired(struct work_struct *work)
{
struct delayed_work *del_work = to_delayed_work(work);
struct br_mrp *mrp = container_of(del_work, struct br_mrp, in_test_work);
struct net_bridge_port *p;
bool notify_open = false;
struct sk_buff *skb;
if (time_before_eq(mrp->in_test_end, jiffies))
return;
if (mrp->in_test_count_miss < mrp->in_test_max_miss) {
mrp->in_test_count_miss++;
} else {
/* Notify that the interconnect ring is open only if the
* interconnect ring state is closed, otherwise it would
* continue to notify at every interval.
*/
if (mrp->in_state == BR_MRP_IN_STATE_CLOSED)
notify_open = true;
}
rcu_read_lock();
p = rcu_dereference(mrp->p_port);
if (p) {
skb = br_mrp_alloc_in_test_skb(mrp, p,
BR_MRP_PORT_ROLE_PRIMARY);
if (!skb)
goto out;
skb_reset_network_header(skb);
dev_queue_xmit(skb);
if (notify_open && !mrp->in_role_offloaded)
br_mrp_in_port_open(p->dev, true);
}
p = rcu_dereference(mrp->s_port);
if (p) {
skb = br_mrp_alloc_in_test_skb(mrp, p,
BR_MRP_PORT_ROLE_SECONDARY);
if (!skb)
goto out;
skb_reset_network_header(skb);
dev_queue_xmit(skb);
if (notify_open && !mrp->in_role_offloaded)
br_mrp_in_port_open(p->dev, true);
}
p = rcu_dereference(mrp->i_port);
if (p) {
skb = br_mrp_alloc_in_test_skb(mrp, p,
BR_MRP_PORT_ROLE_INTER);
if (!skb)
goto out;
skb_reset_network_header(skb);
dev_queue_xmit(skb);
if (notify_open && !mrp->in_role_offloaded)
br_mrp_in_port_open(p->dev, true);
}
out:
rcu_read_unlock();
queue_delayed_work(system_wq, &mrp->in_test_work,
usecs_to_jiffies(mrp->in_test_interval));
}
/* Deletes the MRP instance.
* note: called under rtnl_lock
*/
static void br_mrp_del_impl(struct net_bridge *br, struct br_mrp *mrp)
{
struct net_bridge_port *p;
u8 state;
/* Stop sending MRP_Test frames */
cancel_delayed_work_sync(&mrp->test_work);
br_mrp_switchdev_send_ring_test(br, mrp, 0, 0, 0, 0);
/* Stop sending MRP_InTest frames if has an interconnect role */
cancel_delayed_work_sync(&mrp->in_test_work);
br_mrp_switchdev_send_in_test(br, mrp, 0, 0, 0);
/* Disable the roles */
br_mrp_switchdev_set_ring_role(br, mrp, BR_MRP_RING_ROLE_DISABLED);
p = rtnl_dereference(mrp->i_port);
if (p)
br_mrp_switchdev_set_in_role(br, mrp, mrp->in_id, mrp->ring_id,
BR_MRP_IN_ROLE_DISABLED);
br_mrp_switchdev_del(br, mrp);
/* Reset the ports */
p = rtnl_dereference(mrp->p_port);
if (p) {
spin_lock_bh(&br->lock);
state = netif_running(br->dev) ?
BR_STATE_FORWARDING : BR_STATE_DISABLED;
p->state = state;
p->flags &= ~BR_MRP_AWARE;
spin_unlock_bh(&br->lock);
br_mrp_port_switchdev_set_state(p, state);
rcu_assign_pointer(mrp->p_port, NULL);
}
p = rtnl_dereference(mrp->s_port);
if (p) {
spin_lock_bh(&br->lock);
state = netif_running(br->dev) ?
BR_STATE_FORWARDING : BR_STATE_DISABLED;
p->state = state;
p->flags &= ~BR_MRP_AWARE;
spin_unlock_bh(&br->lock);
br_mrp_port_switchdev_set_state(p, state);
rcu_assign_pointer(mrp->s_port, NULL);
}
p = rtnl_dereference(mrp->i_port);
if (p) {
spin_lock_bh(&br->lock);
state = netif_running(br->dev) ?
BR_STATE_FORWARDING : BR_STATE_DISABLED;
p->state = state;
p->flags &= ~BR_MRP_AWARE;
spin_unlock_bh(&br->lock);
br_mrp_port_switchdev_set_state(p, state);
rcu_assign_pointer(mrp->i_port, NULL);
}
hlist_del_rcu(&mrp->list);
kfree_rcu(mrp, rcu);
if (hlist_empty(&br->mrp_list))
br_del_frame(br, &mrp_frame_type);
}
/* Adds a new MRP instance.
* note: called under rtnl_lock
*/
int br_mrp_add(struct net_bridge *br, struct br_mrp_instance *instance)
{
struct net_bridge_port *p;
struct br_mrp *mrp;
int err;
/* If the ring exists, it is not possible to create another one with the
* same ring_id
*/
mrp = br_mrp_find_id(br, instance->ring_id);
if (mrp)
return -EINVAL;
if (!br_mrp_get_port(br, instance->p_ifindex) ||
!br_mrp_get_port(br, instance->s_ifindex))
return -EINVAL;
/* It is not possible to have the same port part of multiple rings */
if (!br_mrp_unique_ifindex(br, instance->p_ifindex) ||
!br_mrp_unique_ifindex(br, instance->s_ifindex))
return -EINVAL;
mrp = kzalloc(sizeof(*mrp), GFP_KERNEL);
if (!mrp)
return -ENOMEM;
mrp->ring_id = instance->ring_id;
mrp->prio = instance->prio;
p = br_mrp_get_port(br, instance->p_ifindex);
spin_lock_bh(&br->lock);
p->state = BR_STATE_FORWARDING;
p->flags |= BR_MRP_AWARE;
spin_unlock_bh(&br->lock);
rcu_assign_pointer(mrp->p_port, p);
p = br_mrp_get_port(br, instance->s_ifindex);
spin_lock_bh(&br->lock);
p->state = BR_STATE_FORWARDING;
p->flags |= BR_MRP_AWARE;
spin_unlock_bh(&br->lock);
rcu_assign_pointer(mrp->s_port, p);
if (hlist_empty(&br->mrp_list))
br_add_frame(br, &mrp_frame_type);
INIT_DELAYED_WORK(&mrp->test_work, br_mrp_test_work_expired);
INIT_DELAYED_WORK(&mrp->in_test_work, br_mrp_in_test_work_expired);
hlist_add_tail_rcu(&mrp->list, &br->mrp_list);
err = br_mrp_switchdev_add(br, mrp);
if (err)
goto delete_mrp;
return 0;
delete_mrp:
br_mrp_del_impl(br, mrp);
return err;
}
/* Deletes the MRP instance from which the port is part of
* note: called under rtnl_lock
*/
void br_mrp_port_del(struct net_bridge *br, struct net_bridge_port *p)
{
struct br_mrp *mrp = br_mrp_find_port(br, p);
/* If the port is not part of a MRP instance just bail out */
if (!mrp)
return;
br_mrp_del_impl(br, mrp);
}
/* Deletes existing MRP instance based on ring_id
* note: called under rtnl_lock
*/
int br_mrp_del(struct net_bridge *br, struct br_mrp_instance *instance)
{
struct br_mrp *mrp = br_mrp_find_id(br, instance->ring_id);
if (!mrp)
return -EINVAL;
br_mrp_del_impl(br, mrp);
return 0;
}
/* Set port state, port state can be forwarding, blocked or disabled
* note: already called with rtnl_lock
*/
int br_mrp_set_port_state(struct net_bridge_port *p,
enum br_mrp_port_state_type state)
{
u32 port_state;
if (!p || !(p->flags & BR_MRP_AWARE))
return -EINVAL;
spin_lock_bh(&p->br->lock);
if (state == BR_MRP_PORT_STATE_FORWARDING)
port_state = BR_STATE_FORWARDING;
else
port_state = BR_STATE_BLOCKING;
p->state = port_state;
spin_unlock_bh(&p->br->lock);
br_mrp_port_switchdev_set_state(p, port_state);
return 0;
}
/* Set port role, port role can be primary or secondary
* note: already called with rtnl_lock
*/
int br_mrp_set_port_role(struct net_bridge_port *p,
enum br_mrp_port_role_type role)
{
struct br_mrp *mrp;
if (!p || !(p->flags & BR_MRP_AWARE))
return -EINVAL;
mrp = br_mrp_find_port(p->br, p);
if (!mrp)
return -EINVAL;
switch (role) {
case BR_MRP_PORT_ROLE_PRIMARY:
rcu_assign_pointer(mrp->p_port, p);
break;
case BR_MRP_PORT_ROLE_SECONDARY:
rcu_assign_pointer(mrp->s_port, p);
break;
default:
return -EINVAL;
}
br_mrp_port_switchdev_set_role(p, role);
return 0;
}
/* Set ring state, ring state can be only Open or Closed
* note: already called with rtnl_lock
*/
int br_mrp_set_ring_state(struct net_bridge *br,
struct br_mrp_ring_state *state)
{
struct br_mrp *mrp = br_mrp_find_id(br, state->ring_id);
if (!mrp)
return -EINVAL;
if (mrp->ring_state != state->ring_state)
mrp->ring_transitions++;
mrp->ring_state = state->ring_state;
br_mrp_switchdev_set_ring_state(br, mrp, state->ring_state);
return 0;
}
/* Set ring role, ring role can be only MRM(Media Redundancy Manager) or
* MRC(Media Redundancy Client).
* note: already called with rtnl_lock
*/
int br_mrp_set_ring_role(struct net_bridge *br,
struct br_mrp_ring_role *role)
{
struct br_mrp *mrp = br_mrp_find_id(br, role->ring_id);
enum br_mrp_hw_support support;
if (!mrp)
return -EINVAL;
mrp->ring_role = role->ring_role;
/* If there is an error just bailed out */
support = br_mrp_switchdev_set_ring_role(br, mrp, role->ring_role);
if (support == BR_MRP_NONE)
return -EOPNOTSUPP;
/* Now detect if the HW actually applied the role or not. If the HW
* applied the role it means that the SW will not to do those operations
* anymore. For example if the role ir MRM then the HW will notify the
* SW when ring is open, but if the is not pushed to the HW the SW will
* need to detect when the ring is open
*/
mrp->ring_role_offloaded = support == BR_MRP_SW ? 0 : 1;
return 0;
}
/* Start to generate or monitor MRP test frames, the frames are generated by
* HW and if it fails, they are generated by the SW.
* note: already called with rtnl_lock
*/
int br_mrp_start_test(struct net_bridge *br,
struct br_mrp_start_test *test)
{
struct br_mrp *mrp = br_mrp_find_id(br, test->ring_id);
enum br_mrp_hw_support support;
if (!mrp)
return -EINVAL;
/* Try to push it to the HW and if it fails then continue with SW
* implementation and if that also fails then return error.
*/
support = br_mrp_switchdev_send_ring_test(br, mrp, test->interval,
test->max_miss, test->period,
test->monitor);
if (support == BR_MRP_NONE)
return -EOPNOTSUPP;
if (support == BR_MRP_HW)
return 0;
mrp->test_interval = test->interval;
mrp->test_end = jiffies + usecs_to_jiffies(test->period);
mrp->test_max_miss = test->max_miss;
mrp->test_monitor = test->monitor;
mrp->test_count_miss = 0;
queue_delayed_work(system_wq, &mrp->test_work,
usecs_to_jiffies(test->interval));
return 0;
}
/* Set in state, int state can be only Open or Closed
* note: already called with rtnl_lock
*/
int br_mrp_set_in_state(struct net_bridge *br, struct br_mrp_in_state *state)
{
struct br_mrp *mrp = br_mrp_find_in_id(br, state->in_id);
if (!mrp)
return -EINVAL;
if (mrp->in_state != state->in_state)
mrp->in_transitions++;
mrp->in_state = state->in_state;
br_mrp_switchdev_set_in_state(br, mrp, state->in_state);
return 0;
}
/* Set in role, in role can be only MIM(Media Interconnection Manager) or
* MIC(Media Interconnection Client).
* note: already called with rtnl_lock
*/
int br_mrp_set_in_role(struct net_bridge *br, struct br_mrp_in_role *role)
{
struct br_mrp *mrp = br_mrp_find_id(br, role->ring_id);
enum br_mrp_hw_support support;
struct net_bridge_port *p;
if (!mrp)
return -EINVAL;
if (!br_mrp_get_port(br, role->i_ifindex))
return -EINVAL;
if (role->in_role == BR_MRP_IN_ROLE_DISABLED) {
u8 state;
/* It is not allowed to disable a port that doesn't exist */
p = rtnl_dereference(mrp->i_port);
if (!p)
return -EINVAL;
/* Stop the generating MRP_InTest frames */
cancel_delayed_work_sync(&mrp->in_test_work);
br_mrp_switchdev_send_in_test(br, mrp, 0, 0, 0);
/* Remove the port */
spin_lock_bh(&br->lock);
state = netif_running(br->dev) ?
BR_STATE_FORWARDING : BR_STATE_DISABLED;
p->state = state;
p->flags &= ~BR_MRP_AWARE;
spin_unlock_bh(&br->lock);
br_mrp_port_switchdev_set_state(p, state);
rcu_assign_pointer(mrp->i_port, NULL);
mrp->in_role = role->in_role;
mrp->in_id = 0;
return 0;
}
/* It is not possible to have the same port part of multiple rings */
if (!br_mrp_unique_ifindex(br, role->i_ifindex))
return -EINVAL;
/* It is not allowed to set a different interconnect port if the mrp
* instance has already one. First it needs to be disabled and after
* that set the new port
*/
if (rcu_access_pointer(mrp->i_port))
return -EINVAL;
p = br_mrp_get_port(br, role->i_ifindex);
spin_lock_bh(&br->lock);
p->state = BR_STATE_FORWARDING;
p->flags |= BR_MRP_AWARE;
spin_unlock_bh(&br->lock);
rcu_assign_pointer(mrp->i_port, p);
mrp->in_role = role->in_role;
mrp->in_id = role->in_id;
/* If there is an error just bailed out */
support = br_mrp_switchdev_set_in_role(br, mrp, role->in_id,
role->ring_id, role->in_role);
if (support == BR_MRP_NONE)
return -EOPNOTSUPP;
/* Now detect if the HW actually applied the role or not. If the HW
* applied the role it means that the SW will not to do those operations
* anymore. For example if the role is MIM then the HW will notify the
* SW when interconnect ring is open, but if the is not pushed to the HW
* the SW will need to detect when the interconnect ring is open.
*/
mrp->in_role_offloaded = support == BR_MRP_SW ? 0 : 1;
return 0;
}
/* Start to generate MRP_InTest frames, the frames are generated by
* HW and if it fails, they are generated by the SW.
* note: already called with rtnl_lock
*/
int br_mrp_start_in_test(struct net_bridge *br,
struct br_mrp_start_in_test *in_test)
{
struct br_mrp *mrp = br_mrp_find_in_id(br, in_test->in_id);
enum br_mrp_hw_support support;
if (!mrp)
return -EINVAL;
if (mrp->in_role != BR_MRP_IN_ROLE_MIM)
return -EINVAL;
/* Try to push it to the HW and if it fails then continue with SW
* implementation and if that also fails then return error.
*/
support = br_mrp_switchdev_send_in_test(br, mrp, in_test->interval,
in_test->max_miss,
in_test->period);
if (support == BR_MRP_NONE)
return -EOPNOTSUPP;
if (support == BR_MRP_HW)
return 0;
mrp->in_test_interval = in_test->interval;
mrp->in_test_end = jiffies + usecs_to_jiffies(in_test->period);
mrp->in_test_max_miss = in_test->max_miss;
mrp->in_test_count_miss = 0;
queue_delayed_work(system_wq, &mrp->in_test_work,
usecs_to_jiffies(in_test->interval));
return 0;
}
/* Determine if the frame type is a ring frame */
static bool br_mrp_ring_frame(struct sk_buff *skb)
{
const struct br_mrp_tlv_hdr *hdr;
struct br_mrp_tlv_hdr _hdr;
hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
if (!hdr)
return false;
if (hdr->type == BR_MRP_TLV_HEADER_RING_TEST ||
hdr->type == BR_MRP_TLV_HEADER_RING_TOPO ||
hdr->type == BR_MRP_TLV_HEADER_RING_LINK_DOWN ||
hdr->type == BR_MRP_TLV_HEADER_RING_LINK_UP ||
hdr->type == BR_MRP_TLV_HEADER_OPTION)
return true;
return false;
}
/* Determine if the frame type is an interconnect frame */
static bool br_mrp_in_frame(struct sk_buff *skb)
{
const struct br_mrp_tlv_hdr *hdr;
struct br_mrp_tlv_hdr _hdr;
hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
if (!hdr)
return false;
if (hdr->type == BR_MRP_TLV_HEADER_IN_TEST ||
hdr->type == BR_MRP_TLV_HEADER_IN_TOPO ||
hdr->type == BR_MRP_TLV_HEADER_IN_LINK_DOWN ||
hdr->type == BR_MRP_TLV_HEADER_IN_LINK_UP ||
hdr->type == BR_MRP_TLV_HEADER_IN_LINK_STATUS)
return true;
return false;
}
/* Process only MRP Test frame. All the other MRP frames are processed by
* userspace application
* note: already called with rcu_read_lock
*/
static void br_mrp_mrm_process(struct br_mrp *mrp, struct net_bridge_port *port,
struct sk_buff *skb)
{
const struct br_mrp_tlv_hdr *hdr;
struct br_mrp_tlv_hdr _hdr;
/* Each MRP header starts with a version field which is 16 bits.
* Therefore skip the version and get directly the TLV header.
*/
hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
if (!hdr)
return;
if (hdr->type != BR_MRP_TLV_HEADER_RING_TEST)
return;
mrp->test_count_miss = 0;
/* Notify the userspace that the ring is closed only when the ring is
* not closed
*/
if (mrp->ring_state != BR_MRP_RING_STATE_CLOSED)
br_mrp_ring_port_open(port->dev, false);
}
/* Determine if the test hdr has a better priority than the node */
static bool br_mrp_test_better_than_own(struct br_mrp *mrp,
struct net_bridge *br,
const struct br_mrp_ring_test_hdr *hdr)
{
u16 prio = be16_to_cpu(hdr->prio);
if (prio < mrp->prio ||
(prio == mrp->prio &&
ether_addr_to_u64(hdr->sa) < ether_addr_to_u64(br->dev->dev_addr)))
return true;
return false;
}
/* Process only MRP Test frame. All the other MRP frames are processed by
* userspace application
* note: already called with rcu_read_lock
*/
static void br_mrp_mra_process(struct br_mrp *mrp, struct net_bridge *br,
struct net_bridge_port *port,
struct sk_buff *skb)
{
const struct br_mrp_ring_test_hdr *test_hdr;
struct br_mrp_ring_test_hdr _test_hdr;
const struct br_mrp_tlv_hdr *hdr;
struct br_mrp_tlv_hdr _hdr;
/* Each MRP header starts with a version field which is 16 bits.
* Therefore skip the version and get directly the TLV header.
*/
hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
if (!hdr)
return;
if (hdr->type != BR_MRP_TLV_HEADER_RING_TEST)
return;
test_hdr = skb_header_pointer(skb, sizeof(uint16_t) + sizeof(_hdr),
sizeof(_test_hdr), &_test_hdr);
if (!test_hdr)
return;
/* Only frames that have a better priority than the node will
* clear the miss counter because otherwise the node will need to behave
* as MRM.
*/
if (br_mrp_test_better_than_own(mrp, br, test_hdr))
mrp->test_count_miss = 0;
}
/* Process only MRP InTest frame. All the other MRP frames are processed by
* userspace application
* note: already called with rcu_read_lock
*/
static bool br_mrp_mim_process(struct br_mrp *mrp, struct net_bridge_port *port,
struct sk_buff *skb)
{
const struct br_mrp_in_test_hdr *in_hdr;
struct br_mrp_in_test_hdr _in_hdr;
const struct br_mrp_tlv_hdr *hdr;
struct br_mrp_tlv_hdr _hdr;
/* Each MRP header starts with a version field which is 16 bits.
* Therefore skip the version and get directly the TLV header.
*/
hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
if (!hdr)
return false;
/* The check for InTest frame type was already done */
in_hdr = skb_header_pointer(skb, sizeof(uint16_t) + sizeof(_hdr),
sizeof(_in_hdr), &_in_hdr);
if (!in_hdr)
return false;
/* It needs to process only it's own InTest frames. */
if (mrp->in_id != ntohs(in_hdr->id))
return false;
mrp->in_test_count_miss = 0;
/* Notify the userspace that the ring is closed only when the ring is
* not closed
*/
if (mrp->in_state != BR_MRP_IN_STATE_CLOSED)
br_mrp_in_port_open(port->dev, false);
return true;
}
/* Get the MRP frame type
* note: already called with rcu_read_lock
*/
static u8 br_mrp_get_frame_type(struct sk_buff *skb)
{
const struct br_mrp_tlv_hdr *hdr;
struct br_mrp_tlv_hdr _hdr;
/* Each MRP header starts with a version field which is 16 bits.
* Therefore skip the version and get directly the TLV header.
*/
hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
if (!hdr)
return 0xff;
return hdr->type;
}
static bool br_mrp_mrm_behaviour(struct br_mrp *mrp)
{
if (mrp->ring_role == BR_MRP_RING_ROLE_MRM ||
(mrp->ring_role == BR_MRP_RING_ROLE_MRA && !mrp->test_monitor))
return true;
return false;
}
static bool br_mrp_mrc_behaviour(struct br_mrp *mrp)
{
if (mrp->ring_role == BR_MRP_RING_ROLE_MRC ||
(mrp->ring_role == BR_MRP_RING_ROLE_MRA && mrp->test_monitor))
return true;
return false;
}
/* This will just forward the frame to the other mrp ring ports, depending on
* the frame type, ring role and interconnect role
* note: already called with rcu_read_lock
*/
static int br_mrp_rcv(struct net_bridge_port *p,
struct sk_buff *skb, struct net_device *dev)
{
struct net_bridge_port *p_port, *s_port, *i_port = NULL;
struct net_bridge_port *p_dst, *s_dst, *i_dst = NULL;
struct net_bridge *br;
struct br_mrp *mrp;
/* If port is disabled don't accept any frames */
if (p->state == BR_STATE_DISABLED)
return 0;
br = p->br;
mrp = br_mrp_find_port(br, p);
if (unlikely(!mrp))
return 0;
p_port = rcu_dereference(mrp->p_port);
if (!p_port)
return 0;
p_dst = p_port;
s_port = rcu_dereference(mrp->s_port);
if (!s_port)
return 0;
s_dst = s_port;
/* If the frame is a ring frame then it is not required to check the
* interconnect role and ports to process or forward the frame
*/
if (br_mrp_ring_frame(skb)) {
/* If the role is MRM then don't forward the frames */
if (mrp->ring_role == BR_MRP_RING_ROLE_MRM) {
br_mrp_mrm_process(mrp, p, skb);
goto no_forward;
}
/* If the role is MRA then don't forward the frames if it
* behaves as MRM node
*/
if (mrp->ring_role == BR_MRP_RING_ROLE_MRA) {
if (!mrp->test_monitor) {
br_mrp_mrm_process(mrp, p, skb);
goto no_forward;
}
br_mrp_mra_process(mrp, br, p, skb);
}
goto forward;
}
if (br_mrp_in_frame(skb)) {
u8 in_type = br_mrp_get_frame_type(skb);
i_port = rcu_dereference(mrp->i_port);
i_dst = i_port;
/* If the ring port is in block state it should not forward
* In_Test frames
*/
if (br_mrp_is_ring_port(p_port, s_port, p) &&
p->state == BR_STATE_BLOCKING &&
in_type == BR_MRP_TLV_HEADER_IN_TEST)
goto no_forward;
/* Nodes that behaves as MRM needs to stop forwarding the
* frames in case the ring is closed, otherwise will be a loop.
* In this case the frame is no forward between the ring ports.
*/
if (br_mrp_mrm_behaviour(mrp) &&
br_mrp_is_ring_port(p_port, s_port, p) &&
(s_port->state != BR_STATE_FORWARDING ||
p_port->state != BR_STATE_FORWARDING)) {
p_dst = NULL;
s_dst = NULL;
}
/* A node that behaves as MRC and doesn't have a interconnect
* role then it should forward all frames between the ring ports
* because it doesn't have an interconnect port
*/
if (br_mrp_mrc_behaviour(mrp) &&
mrp->in_role == BR_MRP_IN_ROLE_DISABLED)
goto forward;
if (mrp->in_role == BR_MRP_IN_ROLE_MIM) {
if (in_type == BR_MRP_TLV_HEADER_IN_TEST) {
/* MIM should not forward it's own InTest
* frames
*/
if (br_mrp_mim_process(mrp, p, skb)) {
goto no_forward;
} else {
if (br_mrp_is_ring_port(p_port, s_port,
p))
i_dst = NULL;
if (br_mrp_is_in_port(i_port, p))
goto no_forward;
}
} else {
/* MIM should forward IntLinkChange/Status and
* IntTopoChange between ring ports but MIM
* should not forward IntLinkChange/Status and
* IntTopoChange if the frame was received at
* the interconnect port
*/
if (br_mrp_is_ring_port(p_port, s_port, p))
i_dst = NULL;
if (br_mrp_is_in_port(i_port, p))
goto no_forward;
}
}
if (mrp->in_role == BR_MRP_IN_ROLE_MIC) {
/* MIC should forward InTest frames on all ports
* regardless of the received port
*/
if (in_type == BR_MRP_TLV_HEADER_IN_TEST)
goto forward;
/* MIC should forward IntLinkChange frames only if they
* are received on ring ports to all the ports
*/
if (br_mrp_is_ring_port(p_port, s_port, p) &&
(in_type == BR_MRP_TLV_HEADER_IN_LINK_UP ||
in_type == BR_MRP_TLV_HEADER_IN_LINK_DOWN))
goto forward;
/* MIC should forward IntLinkStatus frames only to
* interconnect port if it was received on a ring port.
* If it is received on interconnect port then, it
* should be forward on both ring ports
*/
if (br_mrp_is_ring_port(p_port, s_port, p) &&
in_type == BR_MRP_TLV_HEADER_IN_LINK_STATUS) {
p_dst = NULL;
s_dst = NULL;
}
/* Should forward the InTopo frames only between the
* ring ports
*/
if (in_type == BR_MRP_TLV_HEADER_IN_TOPO) {
i_dst = NULL;
goto forward;
}
/* In all the other cases don't forward the frames */
goto no_forward;
}
}
forward:
if (p_dst)
br_forward(p_dst, skb, true, false);
if (s_dst)
br_forward(s_dst, skb, true, false);
if (i_dst)
br_forward(i_dst, skb, true, false);
no_forward:
return 1;
}
/* Check if the frame was received on a port that is part of MRP ring
* and if the frame has MRP eth. In that case process the frame otherwise do
* normal forwarding.
* note: already called with rcu_read_lock
*/
static int br_mrp_process(struct net_bridge_port *p, struct sk_buff *skb)
{
/* If there is no MRP instance do normal forwarding */
if (likely(!(p->flags & BR_MRP_AWARE)))
goto out;
return br_mrp_rcv(p, skb, p->dev);
out:
return 0;
}
bool br_mrp_enabled(struct net_bridge *br)
{
return !hlist_empty(&br->mrp_list);
}
| linux-master | net/bridge/br_mrp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Spanning tree protocol; generic parts
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/rculist.h>
#include <net/switchdev.h>
#include "br_private.h"
#include "br_private_stp.h"
/* since time values in bpdu are in jiffies and then scaled (1/256)
* before sending, make sure that is at least one STP tick.
*/
#define MESSAGE_AGE_INCR ((HZ / 256) + 1)
static const char *const br_port_state_names[] = {
[BR_STATE_DISABLED] = "disabled",
[BR_STATE_LISTENING] = "listening",
[BR_STATE_LEARNING] = "learning",
[BR_STATE_FORWARDING] = "forwarding",
[BR_STATE_BLOCKING] = "blocking",
};
void br_set_state(struct net_bridge_port *p, unsigned int state)
{
struct switchdev_attr attr = {
.orig_dev = p->dev,
.id = SWITCHDEV_ATTR_ID_PORT_STP_STATE,
.flags = SWITCHDEV_F_DEFER,
.u.stp_state = state,
};
int err;
/* Don't change the state of the ports if they are driven by a different
* protocol.
*/
if (p->flags & BR_MRP_AWARE)
return;
p->state = state;
if (br_opt_get(p->br, BROPT_MST_ENABLED)) {
err = br_mst_set_state(p, 0, state, NULL);
if (err)
br_warn(p->br, "error setting MST state on port %u(%s)\n",
p->port_no, netdev_name(p->dev));
}
err = switchdev_port_attr_set(p->dev, &attr, NULL);
if (err && err != -EOPNOTSUPP)
br_warn(p->br, "error setting offload STP state on port %u(%s)\n",
(unsigned int) p->port_no, p->dev->name);
else
br_info(p->br, "port %u(%s) entered %s state\n",
(unsigned int) p->port_no, p->dev->name,
br_port_state_names[p->state]);
if (p->br->stp_enabled == BR_KERNEL_STP) {
switch (p->state) {
case BR_STATE_BLOCKING:
p->stp_xstats.transition_blk++;
break;
case BR_STATE_FORWARDING:
p->stp_xstats.transition_fwd++;
break;
}
}
}
u8 br_port_get_stp_state(const struct net_device *dev)
{
struct net_bridge_port *p;
ASSERT_RTNL();
p = br_port_get_rtnl(dev);
if (!p)
return BR_STATE_DISABLED;
return p->state;
}
EXPORT_SYMBOL_GPL(br_port_get_stp_state);
/* called under bridge lock */
struct net_bridge_port *br_get_port(struct net_bridge *br, u16 port_no)
{
struct net_bridge_port *p;
list_for_each_entry_rcu(p, &br->port_list, list,
lockdep_is_held(&br->lock)) {
if (p->port_no == port_no)
return p;
}
return NULL;
}
/* called under bridge lock */
static int br_should_become_root_port(const struct net_bridge_port *p,
u16 root_port)
{
struct net_bridge *br;
struct net_bridge_port *rp;
int t;
br = p->br;
if (p->state == BR_STATE_DISABLED ||
br_is_designated_port(p))
return 0;
if (memcmp(&br->bridge_id, &p->designated_root, 8) <= 0)
return 0;
if (!root_port)
return 1;
rp = br_get_port(br, root_port);
t = memcmp(&p->designated_root, &rp->designated_root, 8);
if (t < 0)
return 1;
else if (t > 0)
return 0;
if (p->designated_cost + p->path_cost <
rp->designated_cost + rp->path_cost)
return 1;
else if (p->designated_cost + p->path_cost >
rp->designated_cost + rp->path_cost)
return 0;
t = memcmp(&p->designated_bridge, &rp->designated_bridge, 8);
if (t < 0)
return 1;
else if (t > 0)
return 0;
if (p->designated_port < rp->designated_port)
return 1;
else if (p->designated_port > rp->designated_port)
return 0;
if (p->port_id < rp->port_id)
return 1;
return 0;
}
static void br_root_port_block(const struct net_bridge *br,
struct net_bridge_port *p)
{
br_notice(br, "port %u(%s) tried to become root port (blocked)",
(unsigned int) p->port_no, p->dev->name);
br_set_state(p, BR_STATE_LISTENING);
br_ifinfo_notify(RTM_NEWLINK, NULL, p);
if (br->forward_delay > 0)
mod_timer(&p->forward_delay_timer, jiffies + br->forward_delay);
}
/* called under bridge lock */
static void br_root_selection(struct net_bridge *br)
{
struct net_bridge_port *p;
u16 root_port = 0;
list_for_each_entry(p, &br->port_list, list) {
if (!br_should_become_root_port(p, root_port))
continue;
if (p->flags & BR_ROOT_BLOCK)
br_root_port_block(br, p);
else
root_port = p->port_no;
}
br->root_port = root_port;
if (!root_port) {
br->designated_root = br->bridge_id;
br->root_path_cost = 0;
} else {
p = br_get_port(br, root_port);
br->designated_root = p->designated_root;
br->root_path_cost = p->designated_cost + p->path_cost;
}
}
/* called under bridge lock */
void br_become_root_bridge(struct net_bridge *br)
{
br->max_age = br->bridge_max_age;
br->hello_time = br->bridge_hello_time;
br->forward_delay = br->bridge_forward_delay;
br_topology_change_detection(br);
del_timer(&br->tcn_timer);
if (br->dev->flags & IFF_UP) {
br_config_bpdu_generation(br);
mod_timer(&br->hello_timer, jiffies + br->hello_time);
}
}
/* called under bridge lock */
void br_transmit_config(struct net_bridge_port *p)
{
struct br_config_bpdu bpdu;
struct net_bridge *br;
if (timer_pending(&p->hold_timer)) {
p->config_pending = 1;
return;
}
br = p->br;
bpdu.topology_change = br->topology_change;
bpdu.topology_change_ack = p->topology_change_ack;
bpdu.root = br->designated_root;
bpdu.root_path_cost = br->root_path_cost;
bpdu.bridge_id = br->bridge_id;
bpdu.port_id = p->port_id;
if (br_is_root_bridge(br))
bpdu.message_age = 0;
else {
struct net_bridge_port *root
= br_get_port(br, br->root_port);
bpdu.message_age = (jiffies - root->designated_age)
+ MESSAGE_AGE_INCR;
}
bpdu.max_age = br->max_age;
bpdu.hello_time = br->hello_time;
bpdu.forward_delay = br->forward_delay;
if (bpdu.message_age < br->max_age) {
br_send_config_bpdu(p, &bpdu);
p->topology_change_ack = 0;
p->config_pending = 0;
if (p->br->stp_enabled == BR_KERNEL_STP)
mod_timer(&p->hold_timer,
round_jiffies(jiffies + BR_HOLD_TIME));
}
}
/* called under bridge lock */
static void br_record_config_information(struct net_bridge_port *p,
const struct br_config_bpdu *bpdu)
{
p->designated_root = bpdu->root;
p->designated_cost = bpdu->root_path_cost;
p->designated_bridge = bpdu->bridge_id;
p->designated_port = bpdu->port_id;
p->designated_age = jiffies - bpdu->message_age;
mod_timer(&p->message_age_timer, jiffies
+ (bpdu->max_age - bpdu->message_age));
}
/* called under bridge lock */
static void br_record_config_timeout_values(struct net_bridge *br,
const struct br_config_bpdu *bpdu)
{
br->max_age = bpdu->max_age;
br->hello_time = bpdu->hello_time;
br->forward_delay = bpdu->forward_delay;
__br_set_topology_change(br, bpdu->topology_change);
}
/* called under bridge lock */
void br_transmit_tcn(struct net_bridge *br)
{
struct net_bridge_port *p;
p = br_get_port(br, br->root_port);
if (p)
br_send_tcn_bpdu(p);
else
br_notice(br, "root port %u not found for topology notice\n",
br->root_port);
}
/* called under bridge lock */
static int br_should_become_designated_port(const struct net_bridge_port *p)
{
struct net_bridge *br;
int t;
br = p->br;
if (br_is_designated_port(p))
return 1;
if (memcmp(&p->designated_root, &br->designated_root, 8))
return 1;
if (br->root_path_cost < p->designated_cost)
return 1;
else if (br->root_path_cost > p->designated_cost)
return 0;
t = memcmp(&br->bridge_id, &p->designated_bridge, 8);
if (t < 0)
return 1;
else if (t > 0)
return 0;
if (p->port_id < p->designated_port)
return 1;
return 0;
}
/* called under bridge lock */
static void br_designated_port_selection(struct net_bridge *br)
{
struct net_bridge_port *p;
list_for_each_entry(p, &br->port_list, list) {
if (p->state != BR_STATE_DISABLED &&
br_should_become_designated_port(p))
br_become_designated_port(p);
}
}
/* called under bridge lock */
static int br_supersedes_port_info(const struct net_bridge_port *p,
const struct br_config_bpdu *bpdu)
{
int t;
t = memcmp(&bpdu->root, &p->designated_root, 8);
if (t < 0)
return 1;
else if (t > 0)
return 0;
if (bpdu->root_path_cost < p->designated_cost)
return 1;
else if (bpdu->root_path_cost > p->designated_cost)
return 0;
t = memcmp(&bpdu->bridge_id, &p->designated_bridge, 8);
if (t < 0)
return 1;
else if (t > 0)
return 0;
if (memcmp(&bpdu->bridge_id, &p->br->bridge_id, 8))
return 1;
if (bpdu->port_id <= p->designated_port)
return 1;
return 0;
}
/* called under bridge lock */
static void br_topology_change_acknowledged(struct net_bridge *br)
{
br->topology_change_detected = 0;
del_timer(&br->tcn_timer);
}
/* called under bridge lock */
void br_topology_change_detection(struct net_bridge *br)
{
int isroot = br_is_root_bridge(br);
if (br->stp_enabled != BR_KERNEL_STP)
return;
br_info(br, "topology change detected, %s\n",
isroot ? "propagating" : "sending tcn bpdu");
if (isroot) {
__br_set_topology_change(br, 1);
mod_timer(&br->topology_change_timer, jiffies
+ br->bridge_forward_delay + br->bridge_max_age);
} else if (!br->topology_change_detected) {
br_transmit_tcn(br);
mod_timer(&br->tcn_timer, jiffies + br->bridge_hello_time);
}
br->topology_change_detected = 1;
}
/* called under bridge lock */
void br_config_bpdu_generation(struct net_bridge *br)
{
struct net_bridge_port *p;
list_for_each_entry(p, &br->port_list, list) {
if (p->state != BR_STATE_DISABLED &&
br_is_designated_port(p))
br_transmit_config(p);
}
}
/* called under bridge lock */
static void br_reply(struct net_bridge_port *p)
{
br_transmit_config(p);
}
/* called under bridge lock */
void br_configuration_update(struct net_bridge *br)
{
br_root_selection(br);
br_designated_port_selection(br);
}
/* called under bridge lock */
void br_become_designated_port(struct net_bridge_port *p)
{
struct net_bridge *br;
br = p->br;
p->designated_root = br->designated_root;
p->designated_cost = br->root_path_cost;
p->designated_bridge = br->bridge_id;
p->designated_port = p->port_id;
}
/* called under bridge lock */
static void br_make_blocking(struct net_bridge_port *p)
{
if (p->state != BR_STATE_DISABLED &&
p->state != BR_STATE_BLOCKING) {
if (p->state == BR_STATE_FORWARDING ||
p->state == BR_STATE_LEARNING)
br_topology_change_detection(p->br);
br_set_state(p, BR_STATE_BLOCKING);
br_ifinfo_notify(RTM_NEWLINK, NULL, p);
del_timer(&p->forward_delay_timer);
}
}
/* called under bridge lock */
static void br_make_forwarding(struct net_bridge_port *p)
{
struct net_bridge *br = p->br;
if (p->state != BR_STATE_BLOCKING)
return;
if (br->stp_enabled == BR_NO_STP || br->forward_delay == 0) {
br_set_state(p, BR_STATE_FORWARDING);
br_topology_change_detection(br);
del_timer(&p->forward_delay_timer);
} else if (br->stp_enabled == BR_KERNEL_STP)
br_set_state(p, BR_STATE_LISTENING);
else
br_set_state(p, BR_STATE_LEARNING);
br_ifinfo_notify(RTM_NEWLINK, NULL, p);
if (br->forward_delay != 0)
mod_timer(&p->forward_delay_timer, jiffies + br->forward_delay);
}
/* called under bridge lock */
void br_port_state_selection(struct net_bridge *br)
{
struct net_bridge_port *p;
unsigned int liveports = 0;
list_for_each_entry(p, &br->port_list, list) {
if (p->state == BR_STATE_DISABLED)
continue;
/* Don't change port states if userspace is handling STP */
if (br->stp_enabled != BR_USER_STP) {
if (p->port_no == br->root_port) {
p->config_pending = 0;
p->topology_change_ack = 0;
br_make_forwarding(p);
} else if (br_is_designated_port(p)) {
del_timer(&p->message_age_timer);
br_make_forwarding(p);
} else {
p->config_pending = 0;
p->topology_change_ack = 0;
br_make_blocking(p);
}
}
if (p->state != BR_STATE_BLOCKING)
br_multicast_enable_port(p);
/* Multicast is not disabled for the port when it goes in
* blocking state because the timers will expire and stop by
* themselves without sending more queries.
*/
if (p->state == BR_STATE_FORWARDING)
++liveports;
}
if (liveports == 0)
netif_carrier_off(br->dev);
else
netif_carrier_on(br->dev);
}
/* called under bridge lock */
static void br_topology_change_acknowledge(struct net_bridge_port *p)
{
p->topology_change_ack = 1;
br_transmit_config(p);
}
/* called under bridge lock */
void br_received_config_bpdu(struct net_bridge_port *p,
const struct br_config_bpdu *bpdu)
{
struct net_bridge *br;
int was_root;
p->stp_xstats.rx_bpdu++;
br = p->br;
was_root = br_is_root_bridge(br);
if (br_supersedes_port_info(p, bpdu)) {
br_record_config_information(p, bpdu);
br_configuration_update(br);
br_port_state_selection(br);
if (!br_is_root_bridge(br) && was_root) {
del_timer(&br->hello_timer);
if (br->topology_change_detected) {
del_timer(&br->topology_change_timer);
br_transmit_tcn(br);
mod_timer(&br->tcn_timer,
jiffies + br->bridge_hello_time);
}
}
if (p->port_no == br->root_port) {
br_record_config_timeout_values(br, bpdu);
br_config_bpdu_generation(br);
if (bpdu->topology_change_ack)
br_topology_change_acknowledged(br);
}
} else if (br_is_designated_port(p)) {
br_reply(p);
}
}
/* called under bridge lock */
void br_received_tcn_bpdu(struct net_bridge_port *p)
{
p->stp_xstats.rx_tcn++;
if (br_is_designated_port(p)) {
br_info(p->br, "port %u(%s) received tcn bpdu\n",
(unsigned int) p->port_no, p->dev->name);
br_topology_change_detection(p->br);
br_topology_change_acknowledge(p);
}
}
/* Change bridge STP parameter */
int br_set_hello_time(struct net_bridge *br, unsigned long val)
{
unsigned long t = clock_t_to_jiffies(val);
if (t < BR_MIN_HELLO_TIME || t > BR_MAX_HELLO_TIME)
return -ERANGE;
spin_lock_bh(&br->lock);
br->bridge_hello_time = t;
if (br_is_root_bridge(br))
br->hello_time = br->bridge_hello_time;
spin_unlock_bh(&br->lock);
return 0;
}
int br_set_max_age(struct net_bridge *br, unsigned long val)
{
unsigned long t = clock_t_to_jiffies(val);
if (t < BR_MIN_MAX_AGE || t > BR_MAX_MAX_AGE)
return -ERANGE;
spin_lock_bh(&br->lock);
br->bridge_max_age = t;
if (br_is_root_bridge(br))
br->max_age = br->bridge_max_age;
spin_unlock_bh(&br->lock);
return 0;
}
/* called under bridge lock */
int __set_ageing_time(struct net_device *dev, unsigned long t)
{
struct switchdev_attr attr = {
.orig_dev = dev,
.id = SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME,
.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP | SWITCHDEV_F_DEFER,
.u.ageing_time = jiffies_to_clock_t(t),
};
int err;
err = switchdev_port_attr_set(dev, &attr, NULL);
if (err && err != -EOPNOTSUPP)
return err;
return 0;
}
/* Set time interval that dynamic forwarding entries live
* For pure software bridge, allow values outside the 802.1
* standard specification for special cases:
* 0 - entry never ages (all permanent)
* 1 - entry disappears (no persistence)
*
* Offloaded switch entries maybe more restrictive
*/
int br_set_ageing_time(struct net_bridge *br, clock_t ageing_time)
{
unsigned long t = clock_t_to_jiffies(ageing_time);
int err;
err = __set_ageing_time(br->dev, t);
if (err)
return err;
spin_lock_bh(&br->lock);
br->bridge_ageing_time = t;
br->ageing_time = t;
spin_unlock_bh(&br->lock);
mod_delayed_work(system_long_wq, &br->gc_work, 0);
return 0;
}
clock_t br_get_ageing_time(const struct net_device *br_dev)
{
const struct net_bridge *br;
if (!netif_is_bridge_master(br_dev))
return 0;
br = netdev_priv(br_dev);
return jiffies_to_clock_t(br->ageing_time);
}
EXPORT_SYMBOL_GPL(br_get_ageing_time);
/* called under bridge lock */
void __br_set_topology_change(struct net_bridge *br, unsigned char val)
{
unsigned long t;
int err;
if (br->stp_enabled == BR_KERNEL_STP && br->topology_change != val) {
/* On topology change, set the bridge ageing time to twice the
* forward delay. Otherwise, restore its default ageing time.
*/
if (val) {
t = 2 * br->forward_delay;
br_debug(br, "decreasing ageing time to %lu\n", t);
} else {
t = br->bridge_ageing_time;
br_debug(br, "restoring ageing time to %lu\n", t);
}
err = __set_ageing_time(br->dev, t);
if (err)
br_warn(br, "error offloading ageing time\n");
else
br->ageing_time = t;
}
br->topology_change = val;
}
void __br_set_forward_delay(struct net_bridge *br, unsigned long t)
{
br->bridge_forward_delay = t;
if (br_is_root_bridge(br))
br->forward_delay = br->bridge_forward_delay;
}
int br_set_forward_delay(struct net_bridge *br, unsigned long val)
{
unsigned long t = clock_t_to_jiffies(val);
int err = -ERANGE;
spin_lock_bh(&br->lock);
if (br->stp_enabled != BR_NO_STP &&
(t < BR_MIN_FORWARD_DELAY || t > BR_MAX_FORWARD_DELAY))
goto unlock;
__br_set_forward_delay(br, t);
err = 0;
unlock:
spin_unlock_bh(&br->lock);
return err;
}
| linux-master | net/bridge/br_stp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Handle firewalling
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <[email protected]>
* Bart De Schuymer <[email protected]>
*
* Lennert dedicates this file to Kerstin Wurdinger.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/ip.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/if_pppox.h>
#include <linux/ppp_defs.h>
#include <linux/netfilter_bridge.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter_arp.h>
#include <linux/in_route.h>
#include <linux/inetdevice.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/addrconf.h>
#include <net/route.h>
#include <net/netfilter/br_netfilter.h>
#include <linux/uaccess.h>
#include "br_private.h"
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif
int br_validate_ipv6(struct net *net, struct sk_buff *skb)
{
const struct ipv6hdr *hdr;
struct inet6_dev *idev = __in6_dev_get(skb->dev);
u32 pkt_len;
u8 ip6h_len = sizeof(struct ipv6hdr);
if (!pskb_may_pull(skb, ip6h_len))
goto inhdr_error;
if (skb->len < ip6h_len)
goto drop;
hdr = ipv6_hdr(skb);
if (hdr->version != 6)
goto inhdr_error;
pkt_len = ntohs(hdr->payload_len);
if (hdr->nexthdr == NEXTHDR_HOP && nf_ip6_check_hbh_len(skb, &pkt_len))
goto drop;
if (pkt_len + ip6h_len > skb->len) {
__IP6_INC_STATS(net, idev,
IPSTATS_MIB_INTRUNCATEDPKTS);
goto drop;
}
if (pskb_trim_rcsum(skb, pkt_len + ip6h_len)) {
__IP6_INC_STATS(net, idev,
IPSTATS_MIB_INDISCARDS);
goto drop;
}
memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
/* No IP options in IPv6 header; however it should be
* checked if some next headers need special treatment
*/
return 0;
inhdr_error:
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
drop:
return -1;
}
static inline bool
br_nf_ipv6_daddr_was_changed(const struct sk_buff *skb,
const struct nf_bridge_info *nf_bridge)
{
return memcmp(&nf_bridge->ipv6_daddr, &ipv6_hdr(skb)->daddr,
sizeof(ipv6_hdr(skb)->daddr)) != 0;
}
/* PF_BRIDGE/PRE_ROUTING: Undo the changes made for ip6tables
* PREROUTING and continue the bridge PRE_ROUTING hook. See comment
* for br_nf_pre_routing_finish(), same logic is used here but
* equivalent IPv6 function ip6_route_input() called indirectly.
*/
static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
struct rtable *rt;
struct net_device *dev = skb->dev;
const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
nf_bridge->frag_max_size = IP6CB(skb)->frag_max_size;
if (nf_bridge->pkt_otherhost) {
skb->pkt_type = PACKET_OTHERHOST;
nf_bridge->pkt_otherhost = false;
}
nf_bridge->in_prerouting = 0;
if (br_nf_ipv6_daddr_was_changed(skb, nf_bridge)) {
skb_dst_drop(skb);
v6ops->route_input(skb);
if (skb_dst(skb)->error) {
kfree_skb(skb);
return 0;
}
if (skb_dst(skb)->dev == dev) {
skb->dev = nf_bridge->physindev;
nf_bridge_update_protocol(skb);
nf_bridge_push_encap_header(skb);
br_nf_hook_thresh(NF_BR_PRE_ROUTING,
net, sk, skb, skb->dev, NULL,
br_nf_pre_routing_finish_bridge);
return 0;
}
ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr);
skb->pkt_type = PACKET_HOST;
} else {
rt = bridge_parent_rtable(nf_bridge->physindev);
if (!rt) {
kfree_skb(skb);
return 0;
}
skb_dst_drop(skb);
skb_dst_set_noref(skb, &rt->dst);
}
skb->dev = nf_bridge->physindev;
nf_bridge_update_protocol(skb);
nf_bridge_push_encap_header(skb);
br_nf_hook_thresh(NF_BR_PRE_ROUTING, net, sk, skb,
skb->dev, NULL, br_handle_frame_finish);
return 0;
}
/* Replicate the checks that IPv6 does on packet reception and pass the packet
* to ip6tables.
*/
unsigned int br_nf_pre_routing_ipv6(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct nf_bridge_info *nf_bridge;
if (br_validate_ipv6(state->net, skb))
return NF_DROP;
nf_bridge = nf_bridge_alloc(skb);
if (!nf_bridge)
return NF_DROP;
if (!setup_pre_routing(skb, state->net))
return NF_DROP;
nf_bridge = nf_bridge_info_get(skb);
nf_bridge->ipv6_daddr = ipv6_hdr(skb)->daddr;
skb->protocol = htons(ETH_P_IPV6);
skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
skb->dev, NULL,
br_nf_pre_routing_finish_ipv6);
return NF_STOLEN;
}
| linux-master | net/bridge/br_netfilter_ipv6.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Generic parts
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <[email protected]>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/llc.h>
#include <net/llc.h>
#include <net/stp.h>
#include <net/switchdev.h>
#include "br_private.h"
/*
* Handle changes in state of network devices enslaved to a bridge.
*
* Note: don't care about up/down if bridge itself is down, because
* port state is checked when bridge is brought up.
*/
static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
{
struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
struct netdev_notifier_pre_changeaddr_info *prechaddr_info;
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct net_bridge_port *p;
struct net_bridge *br;
bool notified = false;
bool changed_addr;
int err;
if (netif_is_bridge_master(dev)) {
err = br_vlan_bridge_event(dev, event, ptr);
if (err)
return notifier_from_errno(err);
if (event == NETDEV_REGISTER) {
/* register of bridge completed, add sysfs entries */
err = br_sysfs_addbr(dev);
if (err)
return notifier_from_errno(err);
return NOTIFY_DONE;
}
}
/* not a port of a bridge */
p = br_port_get_rtnl(dev);
if (!p)
return NOTIFY_DONE;
br = p->br;
switch (event) {
case NETDEV_CHANGEMTU:
br_mtu_auto_adjust(br);
break;
case NETDEV_PRE_CHANGEADDR:
if (br->dev->addr_assign_type == NET_ADDR_SET)
break;
prechaddr_info = ptr;
err = dev_pre_changeaddr_notify(br->dev,
prechaddr_info->dev_addr,
extack);
if (err)
return notifier_from_errno(err);
break;
case NETDEV_CHANGEADDR:
spin_lock_bh(&br->lock);
br_fdb_changeaddr(p, dev->dev_addr);
changed_addr = br_stp_recalculate_bridge_id(br);
spin_unlock_bh(&br->lock);
if (changed_addr)
call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
break;
case NETDEV_CHANGE:
br_port_carrier_check(p, ¬ified);
break;
case NETDEV_FEAT_CHANGE:
netdev_update_features(br->dev);
break;
case NETDEV_DOWN:
spin_lock_bh(&br->lock);
if (br->dev->flags & IFF_UP) {
br_stp_disable_port(p);
notified = true;
}
spin_unlock_bh(&br->lock);
break;
case NETDEV_UP:
if (netif_running(br->dev) && netif_oper_up(dev)) {
spin_lock_bh(&br->lock);
br_stp_enable_port(p);
notified = true;
spin_unlock_bh(&br->lock);
}
break;
case NETDEV_UNREGISTER:
br_del_if(br, dev);
break;
case NETDEV_CHANGENAME:
err = br_sysfs_renameif(p);
if (err)
return notifier_from_errno(err);
break;
case NETDEV_PRE_TYPE_CHANGE:
/* Forbid underlying device to change its type. */
return NOTIFY_BAD;
case NETDEV_RESEND_IGMP:
/* Propagate to master device */
call_netdevice_notifiers(event, br->dev);
break;
}
if (event != NETDEV_UNREGISTER)
br_vlan_port_event(p, event);
/* Events that may cause spanning tree to refresh */
if (!notified && (event == NETDEV_CHANGEADDR || event == NETDEV_UP ||
event == NETDEV_CHANGE || event == NETDEV_DOWN))
br_ifinfo_notify(RTM_NEWLINK, NULL, p);
return NOTIFY_DONE;
}
static struct notifier_block br_device_notifier = {
.notifier_call = br_device_event
};
/* called with RTNL or RCU */
static int br_switchdev_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
struct net_bridge_port *p;
struct net_bridge *br;
struct switchdev_notifier_fdb_info *fdb_info;
int err = NOTIFY_DONE;
p = br_port_get_rtnl_rcu(dev);
if (!p)
goto out;
br = p->br;
switch (event) {
case SWITCHDEV_FDB_ADD_TO_BRIDGE:
fdb_info = ptr;
err = br_fdb_external_learn_add(br, p, fdb_info->addr,
fdb_info->vid,
fdb_info->locked, false);
if (err) {
err = notifier_from_errno(err);
break;
}
br_fdb_offloaded_set(br, p, fdb_info->addr,
fdb_info->vid, fdb_info->offloaded);
break;
case SWITCHDEV_FDB_DEL_TO_BRIDGE:
fdb_info = ptr;
err = br_fdb_external_learn_del(br, p, fdb_info->addr,
fdb_info->vid, false);
if (err)
err = notifier_from_errno(err);
break;
case SWITCHDEV_FDB_OFFLOADED:
fdb_info = ptr;
br_fdb_offloaded_set(br, p, fdb_info->addr,
fdb_info->vid, fdb_info->offloaded);
break;
case SWITCHDEV_FDB_FLUSH_TO_BRIDGE:
fdb_info = ptr;
/* Don't delete static entries */
br_fdb_delete_by_port(br, p, fdb_info->vid, 0);
break;
}
out:
return err;
}
static struct notifier_block br_switchdev_notifier = {
.notifier_call = br_switchdev_event,
};
/* called under rtnl_mutex */
static int br_switchdev_blocking_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
struct switchdev_notifier_brport_info *brport_info;
const struct switchdev_brport *b;
struct net_bridge_port *p;
int err = NOTIFY_DONE;
p = br_port_get_rtnl(dev);
if (!p)
goto out;
switch (event) {
case SWITCHDEV_BRPORT_OFFLOADED:
brport_info = ptr;
b = &brport_info->brport;
err = br_switchdev_port_offload(p, b->dev, b->ctx,
b->atomic_nb, b->blocking_nb,
b->tx_fwd_offload, extack);
err = notifier_from_errno(err);
break;
case SWITCHDEV_BRPORT_UNOFFLOADED:
brport_info = ptr;
b = &brport_info->brport;
br_switchdev_port_unoffload(p, b->ctx, b->atomic_nb,
b->blocking_nb);
break;
case SWITCHDEV_BRPORT_REPLAY:
brport_info = ptr;
b = &brport_info->brport;
err = br_switchdev_port_replay(p, b->dev, b->ctx, b->atomic_nb,
b->blocking_nb, extack);
err = notifier_from_errno(err);
break;
}
out:
return err;
}
static struct notifier_block br_switchdev_blocking_notifier = {
.notifier_call = br_switchdev_blocking_event,
};
/* br_boolopt_toggle - change user-controlled boolean option
*
* @br: bridge device
* @opt: id of the option to change
* @on: new option value
* @extack: extack for error messages
*
* Changes the value of the respective boolean option to @on taking care of
* any internal option value mapping and configuration.
*/
int br_boolopt_toggle(struct net_bridge *br, enum br_boolopt_id opt, bool on,
struct netlink_ext_ack *extack)
{
int err = 0;
switch (opt) {
case BR_BOOLOPT_NO_LL_LEARN:
br_opt_toggle(br, BROPT_NO_LL_LEARN, on);
break;
case BR_BOOLOPT_MCAST_VLAN_SNOOPING:
err = br_multicast_toggle_vlan_snooping(br, on, extack);
break;
case BR_BOOLOPT_MST_ENABLE:
err = br_mst_set_enabled(br, on, extack);
break;
default:
/* shouldn't be called with unsupported options */
WARN_ON(1);
break;
}
return err;
}
int br_boolopt_get(const struct net_bridge *br, enum br_boolopt_id opt)
{
switch (opt) {
case BR_BOOLOPT_NO_LL_LEARN:
return br_opt_get(br, BROPT_NO_LL_LEARN);
case BR_BOOLOPT_MCAST_VLAN_SNOOPING:
return br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED);
case BR_BOOLOPT_MST_ENABLE:
return br_opt_get(br, BROPT_MST_ENABLED);
default:
/* shouldn't be called with unsupported options */
WARN_ON(1);
break;
}
return 0;
}
int br_boolopt_multi_toggle(struct net_bridge *br,
struct br_boolopt_multi *bm,
struct netlink_ext_ack *extack)
{
unsigned long bitmap = bm->optmask;
int err = 0;
int opt_id;
for_each_set_bit(opt_id, &bitmap, BR_BOOLOPT_MAX) {
bool on = !!(bm->optval & BIT(opt_id));
err = br_boolopt_toggle(br, opt_id, on, extack);
if (err) {
br_debug(br, "boolopt multi-toggle error: option: %d current: %d new: %d error: %d\n",
opt_id, br_boolopt_get(br, opt_id), on, err);
break;
}
}
return err;
}
void br_boolopt_multi_get(const struct net_bridge *br,
struct br_boolopt_multi *bm)
{
u32 optval = 0;
int opt_id;
for (opt_id = 0; opt_id < BR_BOOLOPT_MAX; opt_id++)
optval |= (br_boolopt_get(br, opt_id) << opt_id);
bm->optval = optval;
bm->optmask = GENMASK((BR_BOOLOPT_MAX - 1), 0);
}
/* private bridge options, controlled by the kernel */
void br_opt_toggle(struct net_bridge *br, enum net_bridge_opts opt, bool on)
{
bool cur = !!br_opt_get(br, opt);
br_debug(br, "toggle option: %d state: %d -> %d\n",
opt, cur, on);
if (cur == on)
return;
if (on)
set_bit(opt, &br->options);
else
clear_bit(opt, &br->options);
}
static void __net_exit br_net_exit_batch(struct list_head *net_list)
{
struct net_device *dev;
struct net *net;
LIST_HEAD(list);
rtnl_lock();
list_for_each_entry(net, net_list, exit_list)
for_each_netdev(net, dev)
if (netif_is_bridge_master(dev))
br_dev_delete(dev, &list);
unregister_netdevice_many(&list);
rtnl_unlock();
}
static struct pernet_operations br_net_ops = {
.exit_batch = br_net_exit_batch,
};
static const struct stp_proto br_stp_proto = {
.rcv = br_stp_rcv,
};
static int __init br_init(void)
{
int err;
BUILD_BUG_ON(sizeof(struct br_input_skb_cb) > sizeof_field(struct sk_buff, cb));
err = stp_proto_register(&br_stp_proto);
if (err < 0) {
pr_err("bridge: can't register sap for STP\n");
return err;
}
err = br_fdb_init();
if (err)
goto err_out;
err = register_pernet_subsys(&br_net_ops);
if (err)
goto err_out1;
err = br_nf_core_init();
if (err)
goto err_out2;
err = register_netdevice_notifier(&br_device_notifier);
if (err)
goto err_out3;
err = register_switchdev_notifier(&br_switchdev_notifier);
if (err)
goto err_out4;
err = register_switchdev_blocking_notifier(&br_switchdev_blocking_notifier);
if (err)
goto err_out5;
err = br_netlink_init();
if (err)
goto err_out6;
brioctl_set(br_ioctl_stub);
#if IS_ENABLED(CONFIG_ATM_LANE)
br_fdb_test_addr_hook = br_fdb_test_addr;
#endif
#if IS_MODULE(CONFIG_BRIDGE_NETFILTER)
pr_info("bridge: filtering via arp/ip/ip6tables is no longer available "
"by default. Update your scripts to load br_netfilter if you "
"need this.\n");
#endif
return 0;
err_out6:
unregister_switchdev_blocking_notifier(&br_switchdev_blocking_notifier);
err_out5:
unregister_switchdev_notifier(&br_switchdev_notifier);
err_out4:
unregister_netdevice_notifier(&br_device_notifier);
err_out3:
br_nf_core_fini();
err_out2:
unregister_pernet_subsys(&br_net_ops);
err_out1:
br_fdb_fini();
err_out:
stp_proto_unregister(&br_stp_proto);
return err;
}
static void __exit br_deinit(void)
{
stp_proto_unregister(&br_stp_proto);
br_netlink_fini();
unregister_switchdev_blocking_notifier(&br_switchdev_blocking_notifier);
unregister_switchdev_notifier(&br_switchdev_notifier);
unregister_netdevice_notifier(&br_device_notifier);
brioctl_set(NULL);
unregister_pernet_subsys(&br_net_ops);
rcu_barrier(); /* Wait for completion of call_rcu()'s */
br_nf_core_fini();
#if IS_ENABLED(CONFIG_ATM_LANE)
br_fdb_test_addr_hook = NULL;
#endif
br_fdb_fini();
}
module_init(br_init)
module_exit(br_deinit)
MODULE_LICENSE("GPL");
MODULE_VERSION(BR_VERSION);
MODULE_ALIAS_RTNL_LINK("bridge");
| linux-master | net/bridge/br.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Handle firewalling core
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <[email protected]>
* Bart De Schuymer <[email protected]>
*
* Lennert dedicates this file to Kerstin Wurdinger.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/in_route.h>
#include <linux/inetdevice.h>
#include <net/route.h>
#include "br_private.h"
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif
static void fake_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu,
bool confirm_neigh)
{
}
static void fake_redirect(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb)
{
}
static u32 *fake_cow_metrics(struct dst_entry *dst, unsigned long old)
{
return NULL;
}
static struct neighbour *fake_neigh_lookup(const struct dst_entry *dst,
struct sk_buff *skb,
const void *daddr)
{
return NULL;
}
static unsigned int fake_mtu(const struct dst_entry *dst)
{
return dst->dev->mtu;
}
static struct dst_ops fake_dst_ops = {
.family = AF_INET,
.update_pmtu = fake_update_pmtu,
.redirect = fake_redirect,
.cow_metrics = fake_cow_metrics,
.neigh_lookup = fake_neigh_lookup,
.mtu = fake_mtu,
};
/*
* Initialize bogus route table used to keep netfilter happy.
* Currently, we fill in the PMTU entry because netfilter
* refragmentation needs it, and the rt_flags entry because
* ipt_REJECT needs it. Future netfilter modules might
* require us to fill additional fields.
*/
static const u32 br_dst_default_metrics[RTAX_MAX] = {
[RTAX_MTU - 1] = 1500,
};
void br_netfilter_rtable_init(struct net_bridge *br)
{
struct rtable *rt = &br->fake_rtable;
rcuref_init(&rt->dst.__rcuref, 1);
rt->dst.dev = br->dev;
dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
rt->dst.flags = DST_NOXFRM | DST_FAKE_RTABLE;
rt->dst.ops = &fake_dst_ops;
}
int __init br_nf_core_init(void)
{
return dst_entries_init(&fake_dst_ops);
}
void br_nf_core_fini(void)
{
dst_entries_destroy(&fake_dst_ops);
}
| linux-master | net/bridge/br_nf_core.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.