python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* net/sched/sch_htb.c Hierarchical token bucket, feed tree version
*
* Authors: Martin Devera, <[email protected]>
*
* Credits (in time order) for older HTB versions:
* Stef Coene <[email protected]>
* HTB support at LARTC mailing list
* Ondrej Kraus, <[email protected]>
* found missing INIT_QDISC(htb)
* Vladimir Smelhaus, Aamer Akhter, Bert Hubert
* helped a lot to locate nasty class stall bug
* Andi Kleen, Jamal Hadi, Bert Hubert
* code review and helpful comments on shaping
* Tomasz Wrona, <[email protected]>
* created test case so that I was able to fix nasty bug
* Wilfried Weissmann
* spotted bug in dequeue code and helped with fix
* Jiri Fojtasek
* fixed requeue routine
* and many others. thanks.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <linux/list.h>
#include <linux/compiler.h>
#include <linux/rbtree.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <net/netlink.h>
#include <net/sch_generic.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
/* HTB algorithm.
Author: [email protected]
========================================================================
HTB is like TBF with multiple classes. It is also similar to CBQ because
it allows to assign priority to each class in hierarchy.
In fact it is another implementation of Floyd's formal sharing.
Levels:
Each class is assigned level. Leaf has ALWAYS level 0 and root
classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
one less than their parent.
*/
static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
#define HTB_VER 0x30011 /* major must be matched with number supplied by TC as version */
#if HTB_VER >> 16 != TC_HTB_PROTOVER
#error "Mismatched sch_htb.c and pkt_sch.h"
#endif
/* Module parameter and sysfs export */
module_param (htb_hysteresis, int, 0640);
MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
static int htb_rate_est = 0; /* htb classes have a default rate estimator */
module_param(htb_rate_est, int, 0640);
MODULE_PARM_DESC(htb_rate_est, "setup a default rate estimator (4sec 16sec) for htb classes");
/* used internaly to keep status of single class */
enum htb_cmode {
HTB_CANT_SEND, /* class can't send and can't borrow */
HTB_MAY_BORROW, /* class can't send but may borrow */
HTB_CAN_SEND /* class can send */
};
struct htb_prio {
union {
struct rb_root row;
struct rb_root feed;
};
struct rb_node *ptr;
/* When class changes from state 1->2 and disconnects from
* parent's feed then we lost ptr value and start from the
* first child again. Here we store classid of the
* last valid ptr (used when ptr is NULL).
*/
u32 last_ptr_id;
};
/* interior & leaf nodes; props specific to leaves are marked L:
* To reduce false sharing, place mostly read fields at beginning,
* and mostly written ones at the end.
*/
struct htb_class {
struct Qdisc_class_common common;
struct psched_ratecfg rate;
struct psched_ratecfg ceil;
s64 buffer, cbuffer;/* token bucket depth/rate */
s64 mbuffer; /* max wait time */
u32 prio; /* these two are used only by leaves... */
int quantum; /* but stored for parent-to-leaf return */
struct tcf_proto __rcu *filter_list; /* class attached filters */
struct tcf_block *block;
int level; /* our level (see above) */
unsigned int children;
struct htb_class *parent; /* parent class */
struct net_rate_estimator __rcu *rate_est;
/*
* Written often fields
*/
struct gnet_stats_basic_sync bstats;
struct gnet_stats_basic_sync bstats_bias;
struct tc_htb_xstats xstats; /* our special stats */
/* token bucket parameters */
s64 tokens, ctokens;/* current number of tokens */
s64 t_c; /* checkpoint time */
union {
struct htb_class_leaf {
int deficit[TC_HTB_MAXDEPTH];
struct Qdisc *q;
struct netdev_queue *offload_queue;
} leaf;
struct htb_class_inner {
struct htb_prio clprio[TC_HTB_NUMPRIO];
} inner;
};
s64 pq_key;
int prio_activity; /* for which prios are we active */
enum htb_cmode cmode; /* current mode of the class */
struct rb_node pq_node; /* node for event queue */
struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
unsigned int drops ____cacheline_aligned_in_smp;
unsigned int overlimits;
};
struct htb_level {
struct rb_root wait_pq;
struct htb_prio hprio[TC_HTB_NUMPRIO];
};
struct htb_sched {
struct Qdisc_class_hash clhash;
int defcls; /* class where unclassified flows go to */
int rate2quantum; /* quant = rate / rate2quantum */
/* filters for qdisc itself */
struct tcf_proto __rcu *filter_list;
struct tcf_block *block;
#define HTB_WARN_TOOMANYEVENTS 0x1
unsigned int warned; /* only one warning */
int direct_qlen;
struct work_struct work;
/* non shaped skbs; let them go directly thru */
struct qdisc_skb_head direct_queue;
u32 direct_pkts;
u32 overlimits;
struct qdisc_watchdog watchdog;
s64 now; /* cached dequeue time */
/* time of nearest event per level (row) */
s64 near_ev_cache[TC_HTB_MAXDEPTH];
int row_mask[TC_HTB_MAXDEPTH];
struct htb_level hlevel[TC_HTB_MAXDEPTH];
struct Qdisc **direct_qdiscs;
unsigned int num_direct_qdiscs;
bool offload;
};
/* find class in global hash table using given handle */
static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
{
struct htb_sched *q = qdisc_priv(sch);
struct Qdisc_class_common *clc;
clc = qdisc_class_find(&q->clhash, handle);
if (clc == NULL)
return NULL;
return container_of(clc, struct htb_class, common);
}
static unsigned long htb_search(struct Qdisc *sch, u32 handle)
{
return (unsigned long)htb_find(handle, sch);
}
#define HTB_DIRECT ((struct htb_class *)-1L)
/**
* htb_classify - classify a packet into class
* @skb: the socket buffer
* @sch: the active queue discipline
* @qerr: pointer for returned status code
*
* It returns NULL if the packet should be dropped or -1 if the packet
* should be passed directly thru. In all other cases leaf class is returned.
* We allow direct class selection by classid in priority. The we examine
* filters in qdisc and in inner nodes (if higher filter points to the inner
* node). If we end up with classid MAJOR:0 we enqueue the skb into special
* internal fifo (direct). These packets then go directly thru. If we still
* have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
* then finish and return direct queue.
*/
static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
int *qerr)
{
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl;
struct tcf_result res;
struct tcf_proto *tcf;
int result;
/* allow to select class by setting skb->priority to valid classid;
* note that nfmark can be used too by attaching filter fw with no
* rules in it
*/
if (skb->priority == sch->handle)
return HTB_DIRECT; /* X:0 (direct flow) selected */
cl = htb_find(skb->priority, sch);
if (cl) {
if (cl->level == 0)
return cl;
/* Start with inner filter chain if a non-leaf class is selected */
tcf = rcu_dereference_bh(cl->filter_list);
} else {
tcf = rcu_dereference_bh(q->filter_list);
}
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
while (tcf && (result = tcf_classify(skb, NULL, tcf, &res, false)) >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
case TC_ACT_QUEUED:
case TC_ACT_STOLEN:
case TC_ACT_TRAP:
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
fallthrough;
case TC_ACT_SHOT:
return NULL;
}
#endif
cl = (void *)res.class;
if (!cl) {
if (res.classid == sch->handle)
return HTB_DIRECT; /* X:0 (direct flow) */
cl = htb_find(res.classid, sch);
if (!cl)
break; /* filter selected invalid classid */
}
if (!cl->level)
return cl; /* we hit leaf; return it */
/* we have got inner class; apply inner filter chain */
tcf = rcu_dereference_bh(cl->filter_list);
}
/* classification failed; try to use default class */
cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
if (!cl || cl->level)
return HTB_DIRECT; /* bad default .. this is safe bet */
return cl;
}
/**
* htb_add_to_id_tree - adds class to the round robin list
* @root: the root of the tree
* @cl: the class to add
* @prio: the give prio in class
*
* Routine adds class to the list (actually tree) sorted by classid.
* Make sure that class is not already on such list for given prio.
*/
static void htb_add_to_id_tree(struct rb_root *root,
struct htb_class *cl, int prio)
{
struct rb_node **p = &root->rb_node, *parent = NULL;
while (*p) {
struct htb_class *c;
parent = *p;
c = rb_entry(parent, struct htb_class, node[prio]);
if (cl->common.classid > c->common.classid)
p = &parent->rb_right;
else
p = &parent->rb_left;
}
rb_link_node(&cl->node[prio], parent, p);
rb_insert_color(&cl->node[prio], root);
}
/**
* htb_add_to_wait_tree - adds class to the event queue with delay
* @q: the priority event queue
* @cl: the class to add
* @delay: delay in microseconds
*
* The class is added to priority event queue to indicate that class will
* change its mode in cl->pq_key microseconds. Make sure that class is not
* already in the queue.
*/
static void htb_add_to_wait_tree(struct htb_sched *q,
struct htb_class *cl, s64 delay)
{
struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL;
cl->pq_key = q->now + delay;
if (cl->pq_key == q->now)
cl->pq_key++;
/* update the nearest event cache */
if (q->near_ev_cache[cl->level] > cl->pq_key)
q->near_ev_cache[cl->level] = cl->pq_key;
while (*p) {
struct htb_class *c;
parent = *p;
c = rb_entry(parent, struct htb_class, pq_node);
if (cl->pq_key >= c->pq_key)
p = &parent->rb_right;
else
p = &parent->rb_left;
}
rb_link_node(&cl->pq_node, parent, p);
rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
}
/**
* htb_next_rb_node - finds next node in binary tree
* @n: the current node in binary tree
*
* When we are past last key we return NULL.
* Average complexity is 2 steps per call.
*/
static inline void htb_next_rb_node(struct rb_node **n)
{
*n = rb_next(*n);
}
/**
* htb_add_class_to_row - add class to its row
* @q: the priority event queue
* @cl: the class to add
* @mask: the given priorities in class in bitmap
*
* The class is added to row at priorities marked in mask.
* It does nothing if mask == 0.
*/
static inline void htb_add_class_to_row(struct htb_sched *q,
struct htb_class *cl, int mask)
{
q->row_mask[cl->level] |= mask;
while (mask) {
int prio = ffz(~mask);
mask &= ~(1 << prio);
htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio);
}
}
/* If this triggers, it is a bug in this code, but it need not be fatal */
static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
{
if (RB_EMPTY_NODE(rb)) {
WARN_ON(1);
} else {
rb_erase(rb, root);
RB_CLEAR_NODE(rb);
}
}
/**
* htb_remove_class_from_row - removes class from its row
* @q: the priority event queue
* @cl: the class to add
* @mask: the given priorities in class in bitmap
*
* The class is removed from row at priorities marked in mask.
* It does nothing if mask == 0.
*/
static inline void htb_remove_class_from_row(struct htb_sched *q,
struct htb_class *cl, int mask)
{
int m = 0;
struct htb_level *hlevel = &q->hlevel[cl->level];
while (mask) {
int prio = ffz(~mask);
struct htb_prio *hprio = &hlevel->hprio[prio];
mask &= ~(1 << prio);
if (hprio->ptr == cl->node + prio)
htb_next_rb_node(&hprio->ptr);
htb_safe_rb_erase(cl->node + prio, &hprio->row);
if (!hprio->row.rb_node)
m |= 1 << prio;
}
q->row_mask[cl->level] &= ~m;
}
/**
* htb_activate_prios - creates active classe's feed chain
* @q: the priority event queue
* @cl: the class to activate
*
* The class is connected to ancestors and/or appropriate rows
* for priorities it is participating on. cl->cmode must be new
* (activated) mode. It does nothing if cl->prio_activity == 0.
*/
static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
{
struct htb_class *p = cl->parent;
long m, mask = cl->prio_activity;
while (cl->cmode == HTB_MAY_BORROW && p && mask) {
m = mask;
while (m) {
unsigned int prio = ffz(~m);
if (WARN_ON_ONCE(prio >= ARRAY_SIZE(p->inner.clprio)))
break;
m &= ~(1 << prio);
if (p->inner.clprio[prio].feed.rb_node)
/* parent already has its feed in use so that
* reset bit in mask as parent is already ok
*/
mask &= ~(1 << prio);
htb_add_to_id_tree(&p->inner.clprio[prio].feed, cl, prio);
}
p->prio_activity |= mask;
cl = p;
p = cl->parent;
}
if (cl->cmode == HTB_CAN_SEND && mask)
htb_add_class_to_row(q, cl, mask);
}
/**
* htb_deactivate_prios - remove class from feed chain
* @q: the priority event queue
* @cl: the class to deactivate
*
* cl->cmode must represent old mode (before deactivation). It does
* nothing if cl->prio_activity == 0. Class is removed from all feed
* chains and rows.
*/
static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
{
struct htb_class *p = cl->parent;
long m, mask = cl->prio_activity;
while (cl->cmode == HTB_MAY_BORROW && p && mask) {
m = mask;
mask = 0;
while (m) {
int prio = ffz(~m);
m &= ~(1 << prio);
if (p->inner.clprio[prio].ptr == cl->node + prio) {
/* we are removing child which is pointed to from
* parent feed - forget the pointer but remember
* classid
*/
p->inner.clprio[prio].last_ptr_id = cl->common.classid;
p->inner.clprio[prio].ptr = NULL;
}
htb_safe_rb_erase(cl->node + prio,
&p->inner.clprio[prio].feed);
if (!p->inner.clprio[prio].feed.rb_node)
mask |= 1 << prio;
}
p->prio_activity &= ~mask;
cl = p;
p = cl->parent;
}
if (cl->cmode == HTB_CAN_SEND && mask)
htb_remove_class_from_row(q, cl, mask);
}
static inline s64 htb_lowater(const struct htb_class *cl)
{
if (htb_hysteresis)
return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
else
return 0;
}
static inline s64 htb_hiwater(const struct htb_class *cl)
{
if (htb_hysteresis)
return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
else
return 0;
}
/**
* htb_class_mode - computes and returns current class mode
* @cl: the target class
* @diff: diff time in microseconds
*
* It computes cl's mode at time cl->t_c+diff and returns it. If mode
* is not HTB_CAN_SEND then cl->pq_key is updated to time difference
* from now to time when cl will change its state.
* Also it is worth to note that class mode doesn't change simply
* at cl->{c,}tokens == 0 but there can rather be hysteresis of
* 0 .. -cl->{c,}buffer range. It is meant to limit number of
* mode transitions per time unit. The speed gain is about 1/6.
*/
static inline enum htb_cmode
htb_class_mode(struct htb_class *cl, s64 *diff)
{
s64 toks;
if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
*diff = -toks;
return HTB_CANT_SEND;
}
if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
return HTB_CAN_SEND;
*diff = -toks;
return HTB_MAY_BORROW;
}
/**
* htb_change_class_mode - changes classe's mode
* @q: the priority event queue
* @cl: the target class
* @diff: diff time in microseconds
*
* This should be the only way how to change classe's mode under normal
* circumstances. Routine will update feed lists linkage, change mode
* and add class to the wait event queue if appropriate. New mode should
* be different from old one and cl->pq_key has to be valid if changing
* to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
*/
static void
htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
{
enum htb_cmode new_mode = htb_class_mode(cl, diff);
if (new_mode == cl->cmode)
return;
if (new_mode == HTB_CANT_SEND) {
cl->overlimits++;
q->overlimits++;
}
if (cl->prio_activity) { /* not necessary: speed optimization */
if (cl->cmode != HTB_CANT_SEND)
htb_deactivate_prios(q, cl);
cl->cmode = new_mode;
if (new_mode != HTB_CANT_SEND)
htb_activate_prios(q, cl);
} else
cl->cmode = new_mode;
}
/**
* htb_activate - inserts leaf cl into appropriate active feeds
* @q: the priority event queue
* @cl: the target class
*
* Routine learns (new) priority of leaf and activates feed chain
* for the prio. It can be called on already active leaf safely.
* It also adds leaf into droplist.
*/
static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
{
WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);
if (!cl->prio_activity) {
cl->prio_activity = 1 << cl->prio;
htb_activate_prios(q, cl);
}
}
/**
* htb_deactivate - remove leaf cl from active feeds
* @q: the priority event queue
* @cl: the target class
*
* Make sure that leaf is active. In the other words it can't be called
* with non-active leaf. It also removes class from the drop list.
*/
static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
{
WARN_ON(!cl->prio_activity);
htb_deactivate_prios(q, cl);
cl->prio_activity = 0;
}
static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
int ret;
unsigned int len = qdisc_pkt_len(skb);
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = htb_classify(skb, sch, &ret);
if (cl == HTB_DIRECT) {
/* enqueue to helper queue */
if (q->direct_queue.qlen < q->direct_qlen) {
__qdisc_enqueue_tail(skb, &q->direct_queue);
q->direct_pkts++;
} else {
return qdisc_drop(skb, sch, to_free);
}
#ifdef CONFIG_NET_CLS_ACT
} else if (!cl) {
if (ret & __NET_XMIT_BYPASS)
qdisc_qstats_drop(sch);
__qdisc_drop(skb, to_free);
return ret;
#endif
} else if ((ret = qdisc_enqueue(skb, cl->leaf.q,
to_free)) != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret)) {
qdisc_qstats_drop(sch);
cl->drops++;
}
return ret;
} else {
htb_activate(q, cl);
}
sch->qstats.backlog += len;
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)
{
s64 toks = diff + cl->tokens;
if (toks > cl->buffer)
toks = cl->buffer;
toks -= (s64) psched_l2t_ns(&cl->rate, bytes);
if (toks <= -cl->mbuffer)
toks = 1 - cl->mbuffer;
cl->tokens = toks;
}
static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
{
s64 toks = diff + cl->ctokens;
if (toks > cl->cbuffer)
toks = cl->cbuffer;
toks -= (s64) psched_l2t_ns(&cl->ceil, bytes);
if (toks <= -cl->mbuffer)
toks = 1 - cl->mbuffer;
cl->ctokens = toks;
}
/**
* htb_charge_class - charges amount "bytes" to leaf and ancestors
* @q: the priority event queue
* @cl: the class to start iterate
* @level: the minimum level to account
* @skb: the socket buffer
*
* Routine assumes that packet "bytes" long was dequeued from leaf cl
* borrowing from "level". It accounts bytes to ceil leaky bucket for
* leaf and all ancestors and to rate bucket for ancestors at levels
* "level" and higher. It also handles possible change of mode resulting
* from the update. Note that mode can also increase here (MAY_BORROW to
* CAN_SEND) because we can use more precise clock that event queue here.
* In such case we remove class from event queue first.
*/
static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
int level, struct sk_buff *skb)
{
int bytes = qdisc_pkt_len(skb);
enum htb_cmode old_mode;
s64 diff;
while (cl) {
diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
if (cl->level >= level) {
if (cl->level == level)
cl->xstats.lends++;
htb_accnt_tokens(cl, bytes, diff);
} else {
cl->xstats.borrows++;
cl->tokens += diff; /* we moved t_c; update tokens */
}
htb_accnt_ctokens(cl, bytes, diff);
cl->t_c = q->now;
old_mode = cl->cmode;
diff = 0;
htb_change_class_mode(q, cl, &diff);
if (old_mode != cl->cmode) {
if (old_mode != HTB_CAN_SEND)
htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
if (cl->cmode != HTB_CAN_SEND)
htb_add_to_wait_tree(q, cl, diff);
}
/* update basic stats except for leaves which are already updated */
if (cl->level)
bstats_update(&cl->bstats, skb);
cl = cl->parent;
}
}
/**
* htb_do_events - make mode changes to classes at the level
* @q: the priority event queue
* @level: which wait_pq in 'q->hlevel'
* @start: start jiffies
*
* Scans event queue for pending events and applies them. Returns time of
* next pending event (0 for no event in pq, q->now for too many events).
* Note: Applied are events whose have cl->pq_key <= q->now.
*/
static s64 htb_do_events(struct htb_sched *q, const int level,
unsigned long start)
{
/* don't run for longer than 2 jiffies; 2 is used instead of
* 1 to simplify things when jiffy is going to be incremented
* too soon
*/
unsigned long stop_at = start + 2;
struct rb_root *wait_pq = &q->hlevel[level].wait_pq;
while (time_before(jiffies, stop_at)) {
struct htb_class *cl;
s64 diff;
struct rb_node *p = rb_first(wait_pq);
if (!p)
return 0;
cl = rb_entry(p, struct htb_class, pq_node);
if (cl->pq_key > q->now)
return cl->pq_key;
htb_safe_rb_erase(p, wait_pq);
diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
htb_change_class_mode(q, cl, &diff);
if (cl->cmode != HTB_CAN_SEND)
htb_add_to_wait_tree(q, cl, diff);
}
/* too much load - let's continue after a break for scheduling */
if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
pr_warn("htb: too many events!\n");
q->warned |= HTB_WARN_TOOMANYEVENTS;
}
return q->now;
}
/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
* is no such one exists.
*/
static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
u32 id)
{
struct rb_node *r = NULL;
while (n) {
struct htb_class *cl =
rb_entry(n, struct htb_class, node[prio]);
if (id > cl->common.classid) {
n = n->rb_right;
} else if (id < cl->common.classid) {
r = n;
n = n->rb_left;
} else {
return n;
}
}
return r;
}
/**
* htb_lookup_leaf - returns next leaf class in DRR order
* @hprio: the current one
* @prio: which prio in class
*
* Find leaf where current feed pointers points to.
*/
static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)
{
int i;
struct {
struct rb_node *root;
struct rb_node **pptr;
u32 *pid;
} stk[TC_HTB_MAXDEPTH], *sp = stk;
BUG_ON(!hprio->row.rb_node);
sp->root = hprio->row.rb_node;
sp->pptr = &hprio->ptr;
sp->pid = &hprio->last_ptr_id;
for (i = 0; i < 65535; i++) {
if (!*sp->pptr && *sp->pid) {
/* ptr was invalidated but id is valid - try to recover
* the original or next ptr
*/
*sp->pptr =
htb_id_find_next_upper(prio, sp->root, *sp->pid);
}
*sp->pid = 0; /* ptr is valid now so that remove this hint as it
* can become out of date quickly
*/
if (!*sp->pptr) { /* we are at right end; rewind & go up */
*sp->pptr = sp->root;
while ((*sp->pptr)->rb_left)
*sp->pptr = (*sp->pptr)->rb_left;
if (sp > stk) {
sp--;
if (!*sp->pptr) {
WARN_ON(1);
return NULL;
}
htb_next_rb_node(sp->pptr);
}
} else {
struct htb_class *cl;
struct htb_prio *clp;
cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
if (!cl->level)
return cl;
clp = &cl->inner.clprio[prio];
(++sp)->root = clp->feed.rb_node;
sp->pptr = &clp->ptr;
sp->pid = &clp->last_ptr_id;
}
}
WARN_ON(1);
return NULL;
}
/* dequeues packet at given priority and level; call only if
* you are sure that there is active class at prio/level
*/
static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio,
const int level)
{
struct sk_buff *skb = NULL;
struct htb_class *cl, *start;
struct htb_level *hlevel = &q->hlevel[level];
struct htb_prio *hprio = &hlevel->hprio[prio];
/* look initial class up in the row */
start = cl = htb_lookup_leaf(hprio, prio);
do {
next:
if (unlikely(!cl))
return NULL;
/* class can be empty - it is unlikely but can be true if leaf
* qdisc drops packets in enqueue routine or if someone used
* graft operation on the leaf since last dequeue;
* simply deactivate and skip such class
*/
if (unlikely(cl->leaf.q->q.qlen == 0)) {
struct htb_class *next;
htb_deactivate(q, cl);
/* row/level might become empty */
if ((q->row_mask[level] & (1 << prio)) == 0)
return NULL;
next = htb_lookup_leaf(hprio, prio);
if (cl == start) /* fix start if we just deleted it */
start = next;
cl = next;
goto next;
}
skb = cl->leaf.q->dequeue(cl->leaf.q);
if (likely(skb != NULL))
break;
qdisc_warn_nonwc("htb", cl->leaf.q);
htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr:
&q->hlevel[0].hprio[prio].ptr);
cl = htb_lookup_leaf(hprio, prio);
} while (cl != start);
if (likely(skb != NULL)) {
bstats_update(&cl->bstats, skb);
cl->leaf.deficit[level] -= qdisc_pkt_len(skb);
if (cl->leaf.deficit[level] < 0) {
cl->leaf.deficit[level] += cl->quantum;
htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr :
&q->hlevel[0].hprio[prio].ptr);
}
/* this used to be after charge_class but this constelation
* gives us slightly better performance
*/
if (!cl->leaf.q->q.qlen)
htb_deactivate(q, cl);
htb_charge_class(q, cl, level, skb);
}
return skb;
}
static struct sk_buff *htb_dequeue(struct Qdisc *sch)
{
struct sk_buff *skb;
struct htb_sched *q = qdisc_priv(sch);
int level;
s64 next_event;
unsigned long start_at;
/* try to dequeue direct packets as high prio (!) to minimize cpu work */
skb = __qdisc_dequeue_head(&q->direct_queue);
if (skb != NULL) {
ok:
qdisc_bstats_update(sch, skb);
qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
return skb;
}
if (!sch->q.qlen)
goto fin;
q->now = ktime_get_ns();
start_at = jiffies;
next_event = q->now + 5LLU * NSEC_PER_SEC;
for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
/* common case optimization - skip event handler quickly */
int m;
s64 event = q->near_ev_cache[level];
if (q->now >= event) {
event = htb_do_events(q, level, start_at);
if (!event)
event = q->now + NSEC_PER_SEC;
q->near_ev_cache[level] = event;
}
if (next_event > event)
next_event = event;
m = ~q->row_mask[level];
while (m != (int)(-1)) {
int prio = ffz(m);
m |= 1 << prio;
skb = htb_dequeue_tree(q, prio, level);
if (likely(skb != NULL))
goto ok;
}
}
if (likely(next_event > q->now))
qdisc_watchdog_schedule_ns(&q->watchdog, next_event);
else
schedule_work(&q->work);
fin:
return skb;
}
/* reset all classes */
/* always caled under BH & queue lock */
static void htb_reset(struct Qdisc *sch)
{
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl;
unsigned int i;
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
if (cl->level)
memset(&cl->inner, 0, sizeof(cl->inner));
else {
if (cl->leaf.q && !q->offload)
qdisc_reset(cl->leaf.q);
}
cl->prio_activity = 0;
cl->cmode = HTB_CAN_SEND;
}
}
qdisc_watchdog_cancel(&q->watchdog);
__qdisc_reset_queue(&q->direct_queue);
memset(q->hlevel, 0, sizeof(q->hlevel));
memset(q->row_mask, 0, sizeof(q->row_mask));
}
static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
[TCA_HTB_PARMS] = { .len = sizeof(struct tc_htb_opt) },
[TCA_HTB_INIT] = { .len = sizeof(struct tc_htb_glob) },
[TCA_HTB_CTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
[TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
[TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 },
[TCA_HTB_RATE64] = { .type = NLA_U64 },
[TCA_HTB_CEIL64] = { .type = NLA_U64 },
[TCA_HTB_OFFLOAD] = { .type = NLA_FLAG },
};
static void htb_work_func(struct work_struct *work)
{
struct htb_sched *q = container_of(work, struct htb_sched, work);
struct Qdisc *sch = q->watchdog.qdisc;
rcu_read_lock();
__netif_schedule(qdisc_root(sch));
rcu_read_unlock();
}
static void htb_set_lockdep_class_child(struct Qdisc *q)
{
static struct lock_class_key child_key;
lockdep_set_class(qdisc_lock(q), &child_key);
}
static int htb_offload(struct net_device *dev, struct tc_htb_qopt_offload *opt)
{
return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_HTB, opt);
}
static int htb_init(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
struct net_device *dev = qdisc_dev(sch);
struct tc_htb_qopt_offload offload_opt;
struct htb_sched *q = qdisc_priv(sch);
struct nlattr *tb[TCA_HTB_MAX + 1];
struct tc_htb_glob *gopt;
unsigned int ntx;
bool offload;
int err;
qdisc_watchdog_init(&q->watchdog, sch);
INIT_WORK(&q->work, htb_work_func);
if (!opt)
return -EINVAL;
err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
if (err)
return err;
err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy,
NULL);
if (err < 0)
return err;
if (!tb[TCA_HTB_INIT])
return -EINVAL;
gopt = nla_data(tb[TCA_HTB_INIT]);
if (gopt->version != HTB_VER >> 16)
return -EINVAL;
offload = nla_get_flag(tb[TCA_HTB_OFFLOAD]);
if (offload) {
if (sch->parent != TC_H_ROOT) {
NL_SET_ERR_MSG(extack, "HTB must be the root qdisc to use offload");
return -EOPNOTSUPP;
}
if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) {
NL_SET_ERR_MSG(extack, "hw-tc-offload ethtool feature flag must be on");
return -EOPNOTSUPP;
}
q->num_direct_qdiscs = dev->real_num_tx_queues;
q->direct_qdiscs = kcalloc(q->num_direct_qdiscs,
sizeof(*q->direct_qdiscs),
GFP_KERNEL);
if (!q->direct_qdiscs)
return -ENOMEM;
}
err = qdisc_class_hash_init(&q->clhash);
if (err < 0)
return err;
if (tb[TCA_HTB_DIRECT_QLEN])
q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
else
q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
if ((q->rate2quantum = gopt->rate2quantum) < 1)
q->rate2quantum = 1;
q->defcls = gopt->defcls;
if (!offload)
return 0;
for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) {
struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx);
struct Qdisc *qdisc;
qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
TC_H_MAKE(sch->handle, 0), extack);
if (!qdisc) {
return -ENOMEM;
}
htb_set_lockdep_class_child(qdisc);
q->direct_qdiscs[ntx] = qdisc;
qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
}
sch->flags |= TCQ_F_MQROOT;
offload_opt = (struct tc_htb_qopt_offload) {
.command = TC_HTB_CREATE,
.parent_classid = TC_H_MAJ(sch->handle) >> 16,
.classid = TC_H_MIN(q->defcls),
.extack = extack,
};
err = htb_offload(dev, &offload_opt);
if (err)
return err;
/* Defer this assignment, so that htb_destroy skips offload-related
* parts (especially calling ndo_setup_tc) on errors.
*/
q->offload = true;
return 0;
}
static void htb_attach_offload(struct Qdisc *sch)
{
struct net_device *dev = qdisc_dev(sch);
struct htb_sched *q = qdisc_priv(sch);
unsigned int ntx;
for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) {
struct Qdisc *old, *qdisc = q->direct_qdiscs[ntx];
old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
qdisc_put(old);
qdisc_hash_add(qdisc, false);
}
for (ntx = q->num_direct_qdiscs; ntx < dev->num_tx_queues; ntx++) {
struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx);
struct Qdisc *old = dev_graft_qdisc(dev_queue, NULL);
qdisc_put(old);
}
kfree(q->direct_qdiscs);
q->direct_qdiscs = NULL;
}
static void htb_attach_software(struct Qdisc *sch)
{
struct net_device *dev = qdisc_dev(sch);
unsigned int ntx;
/* Resemble qdisc_graft behavior. */
for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx);
struct Qdisc *old = dev_graft_qdisc(dev_queue, sch);
qdisc_refcount_inc(sch);
qdisc_put(old);
}
}
static void htb_attach(struct Qdisc *sch)
{
struct htb_sched *q = qdisc_priv(sch);
if (q->offload)
htb_attach_offload(sch);
else
htb_attach_software(sch);
}
static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct htb_sched *q = qdisc_priv(sch);
struct nlattr *nest;
struct tc_htb_glob gopt;
if (q->offload)
sch->flags |= TCQ_F_OFFLOADED;
else
sch->flags &= ~TCQ_F_OFFLOADED;
sch->qstats.overlimits = q->overlimits;
/* Its safe to not acquire qdisc lock. As we hold RTNL,
* no change can happen on the qdisc parameters.
*/
gopt.direct_pkts = q->direct_pkts;
gopt.version = HTB_VER;
gopt.rate2quantum = q->rate2quantum;
gopt.defcls = q->defcls;
gopt.debug = 0;
nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt) ||
nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen))
goto nla_put_failure;
if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD))
goto nla_put_failure;
return nla_nest_end(skb, nest);
nla_put_failure:
nla_nest_cancel(skb, nest);
return -1;
}
static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
struct sk_buff *skb, struct tcmsg *tcm)
{
struct htb_class *cl = (struct htb_class *)arg;
struct htb_sched *q = qdisc_priv(sch);
struct nlattr *nest;
struct tc_htb_opt opt;
/* Its safe to not acquire qdisc lock. As we hold RTNL,
* no change can happen on the class parameters.
*/
tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
tcm->tcm_handle = cl->common.classid;
if (!cl->level && cl->leaf.q)
tcm->tcm_info = cl->leaf.q->handle;
nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
memset(&opt, 0, sizeof(opt));
psched_ratecfg_getrate(&opt.rate, &cl->rate);
opt.buffer = PSCHED_NS2TICKS(cl->buffer);
psched_ratecfg_getrate(&opt.ceil, &cl->ceil);
opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
opt.quantum = cl->quantum;
opt.prio = cl->prio;
opt.level = cl->level;
if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD))
goto nla_put_failure;
if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps,
TCA_HTB_PAD))
goto nla_put_failure;
if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps,
TCA_HTB_PAD))
goto nla_put_failure;
return nla_nest_end(skb, nest);
nla_put_failure:
nla_nest_cancel(skb, nest);
return -1;
}
static void htb_offload_aggregate_stats(struct htb_sched *q,
struct htb_class *cl)
{
u64 bytes = 0, packets = 0;
struct htb_class *c;
unsigned int i;
gnet_stats_basic_sync_init(&cl->bstats);
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry(c, &q->clhash.hash[i], common.hnode) {
struct htb_class *p = c;
while (p && p->level < cl->level)
p = p->parent;
if (p != cl)
continue;
bytes += u64_stats_read(&c->bstats_bias.bytes);
packets += u64_stats_read(&c->bstats_bias.packets);
if (c->level == 0) {
bytes += u64_stats_read(&c->leaf.q->bstats.bytes);
packets += u64_stats_read(&c->leaf.q->bstats.packets);
}
}
}
_bstats_update(&cl->bstats, bytes, packets);
}
static int
htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
{
struct htb_class *cl = (struct htb_class *)arg;
struct htb_sched *q = qdisc_priv(sch);
struct gnet_stats_queue qs = {
.drops = cl->drops,
.overlimits = cl->overlimits,
};
__u32 qlen = 0;
if (!cl->level && cl->leaf.q)
qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog);
cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
INT_MIN, INT_MAX);
cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
INT_MIN, INT_MAX);
if (q->offload) {
if (!cl->level) {
if (cl->leaf.q)
cl->bstats = cl->leaf.q->bstats;
else
gnet_stats_basic_sync_init(&cl->bstats);
_bstats_update(&cl->bstats,
u64_stats_read(&cl->bstats_bias.bytes),
u64_stats_read(&cl->bstats_bias.packets));
} else {
htb_offload_aggregate_stats(q, cl);
}
}
if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0)
return -1;
return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
}
static struct netdev_queue *
htb_select_queue(struct Qdisc *sch, struct tcmsg *tcm)
{
struct net_device *dev = qdisc_dev(sch);
struct tc_htb_qopt_offload offload_opt;
struct htb_sched *q = qdisc_priv(sch);
int err;
if (!q->offload)
return sch->dev_queue;
offload_opt = (struct tc_htb_qopt_offload) {
.command = TC_HTB_LEAF_QUERY_QUEUE,
.classid = TC_H_MIN(tcm->tcm_parent),
};
err = htb_offload(dev, &offload_opt);
if (err || offload_opt.qid >= dev->num_tx_queues)
return NULL;
return netdev_get_tx_queue(dev, offload_opt.qid);
}
static struct Qdisc *
htb_graft_helper(struct netdev_queue *dev_queue, struct Qdisc *new_q)
{
struct net_device *dev = dev_queue->dev;
struct Qdisc *old_q;
if (dev->flags & IFF_UP)
dev_deactivate(dev);
old_q = dev_graft_qdisc(dev_queue, new_q);
if (new_q)
new_q->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
if (dev->flags & IFF_UP)
dev_activate(dev);
return old_q;
}
static struct netdev_queue *htb_offload_get_queue(struct htb_class *cl)
{
struct netdev_queue *queue;
queue = cl->leaf.offload_queue;
if (!(cl->leaf.q->flags & TCQ_F_BUILTIN))
WARN_ON(cl->leaf.q->dev_queue != queue);
return queue;
}
static void htb_offload_move_qdisc(struct Qdisc *sch, struct htb_class *cl_old,
struct htb_class *cl_new, bool destroying)
{
struct netdev_queue *queue_old, *queue_new;
struct net_device *dev = qdisc_dev(sch);
queue_old = htb_offload_get_queue(cl_old);
queue_new = htb_offload_get_queue(cl_new);
if (!destroying) {
struct Qdisc *qdisc;
if (dev->flags & IFF_UP)
dev_deactivate(dev);
qdisc = dev_graft_qdisc(queue_old, NULL);
WARN_ON(qdisc != cl_old->leaf.q);
}
if (!(cl_old->leaf.q->flags & TCQ_F_BUILTIN))
cl_old->leaf.q->dev_queue = queue_new;
cl_old->leaf.offload_queue = queue_new;
if (!destroying) {
struct Qdisc *qdisc;
qdisc = dev_graft_qdisc(queue_new, cl_old->leaf.q);
if (dev->flags & IFF_UP)
dev_activate(dev);
WARN_ON(!(qdisc->flags & TCQ_F_BUILTIN));
}
}
static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
struct Qdisc **old, struct netlink_ext_ack *extack)
{
struct netdev_queue *dev_queue = sch->dev_queue;
struct htb_class *cl = (struct htb_class *)arg;
struct htb_sched *q = qdisc_priv(sch);
struct Qdisc *old_q;
if (cl->level)
return -EINVAL;
if (q->offload)
dev_queue = htb_offload_get_queue(cl);
if (!new) {
new = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
cl->common.classid, extack);
if (!new)
return -ENOBUFS;
}
if (q->offload) {
htb_set_lockdep_class_child(new);
/* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
qdisc_refcount_inc(new);
old_q = htb_graft_helper(dev_queue, new);
}
*old = qdisc_replace(sch, new, &cl->leaf.q);
if (q->offload) {
WARN_ON(old_q != *old);
qdisc_put(old_q);
}
return 0;
}
static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
{
struct htb_class *cl = (struct htb_class *)arg;
return !cl->level ? cl->leaf.q : NULL;
}
static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
{
struct htb_class *cl = (struct htb_class *)arg;
htb_deactivate(qdisc_priv(sch), cl);
}
static inline int htb_parent_last_child(struct htb_class *cl)
{
if (!cl->parent)
/* the root class */
return 0;
if (cl->parent->children > 1)
/* not the last child */
return 0;
return 1;
}
static void htb_parent_to_leaf(struct Qdisc *sch, struct htb_class *cl,
struct Qdisc *new_q)
{
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *parent = cl->parent;
WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity);
if (parent->cmode != HTB_CAN_SEND)
htb_safe_rb_erase(&parent->pq_node,
&q->hlevel[parent->level].wait_pq);
parent->level = 0;
memset(&parent->inner, 0, sizeof(parent->inner));
parent->leaf.q = new_q ? new_q : &noop_qdisc;
parent->tokens = parent->buffer;
parent->ctokens = parent->cbuffer;
parent->t_c = ktime_get_ns();
parent->cmode = HTB_CAN_SEND;
if (q->offload)
parent->leaf.offload_queue = cl->leaf.offload_queue;
}
static void htb_parent_to_leaf_offload(struct Qdisc *sch,
struct netdev_queue *dev_queue,
struct Qdisc *new_q)
{
struct Qdisc *old_q;
/* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
if (new_q)
qdisc_refcount_inc(new_q);
old_q = htb_graft_helper(dev_queue, new_q);
WARN_ON(!(old_q->flags & TCQ_F_BUILTIN));
}
static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
bool last_child, bool destroying,
struct netlink_ext_ack *extack)
{
struct tc_htb_qopt_offload offload_opt;
struct netdev_queue *dev_queue;
struct Qdisc *q = cl->leaf.q;
struct Qdisc *old;
int err;
if (cl->level)
return -EINVAL;
WARN_ON(!q);
dev_queue = htb_offload_get_queue(cl);
/* When destroying, caller qdisc_graft grafts the new qdisc and invokes
* qdisc_put for the qdisc being destroyed. htb_destroy_class_offload
* does not need to graft or qdisc_put the qdisc being destroyed.
*/
if (!destroying) {
old = htb_graft_helper(dev_queue, NULL);
/* Last qdisc grafted should be the same as cl->leaf.q when
* calling htb_delete.
*/
WARN_ON(old != q);
}
if (cl->parent) {
_bstats_update(&cl->parent->bstats_bias,
u64_stats_read(&q->bstats.bytes),
u64_stats_read(&q->bstats.packets));
}
offload_opt = (struct tc_htb_qopt_offload) {
.command = !last_child ? TC_HTB_LEAF_DEL :
destroying ? TC_HTB_LEAF_DEL_LAST_FORCE :
TC_HTB_LEAF_DEL_LAST,
.classid = cl->common.classid,
.extack = extack,
};
err = htb_offload(qdisc_dev(sch), &offload_opt);
if (!destroying) {
if (!err)
qdisc_put(old);
else
htb_graft_helper(dev_queue, old);
}
if (last_child)
return err;
if (!err && offload_opt.classid != TC_H_MIN(cl->common.classid)) {
u32 classid = TC_H_MAJ(sch->handle) |
TC_H_MIN(offload_opt.classid);
struct htb_class *moved_cl = htb_find(classid, sch);
htb_offload_move_qdisc(sch, moved_cl, cl, destroying);
}
return err;
}
static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
{
if (!cl->level) {
WARN_ON(!cl->leaf.q);
qdisc_put(cl->leaf.q);
}
gen_kill_estimator(&cl->rate_est);
tcf_block_put(cl->block);
kfree(cl);
}
static void htb_destroy(struct Qdisc *sch)
{
struct net_device *dev = qdisc_dev(sch);
struct tc_htb_qopt_offload offload_opt;
struct htb_sched *q = qdisc_priv(sch);
struct hlist_node *next;
bool nonempty, changed;
struct htb_class *cl;
unsigned int i;
cancel_work_sync(&q->work);
qdisc_watchdog_cancel(&q->watchdog);
/* This line used to be after htb_destroy_class call below
* and surprisingly it worked in 2.4. But it must precede it
* because filter need its target class alive to be able to call
* unbind_filter on it (without Oops).
*/
tcf_block_put(q->block);
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
tcf_block_put(cl->block);
cl->block = NULL;
}
}
do {
nonempty = false;
changed = false;
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
common.hnode) {
bool last_child;
if (!q->offload) {
htb_destroy_class(sch, cl);
continue;
}
nonempty = true;
if (cl->level)
continue;
changed = true;
last_child = htb_parent_last_child(cl);
htb_destroy_class_offload(sch, cl, last_child,
true, NULL);
qdisc_class_hash_remove(&q->clhash,
&cl->common);
if (cl->parent)
cl->parent->children--;
if (last_child)
htb_parent_to_leaf(sch, cl, NULL);
htb_destroy_class(sch, cl);
}
}
} while (changed);
WARN_ON(nonempty);
qdisc_class_hash_destroy(&q->clhash);
__qdisc_reset_queue(&q->direct_queue);
if (q->offload) {
offload_opt = (struct tc_htb_qopt_offload) {
.command = TC_HTB_DESTROY,
};
htb_offload(dev, &offload_opt);
}
if (!q->direct_qdiscs)
return;
for (i = 0; i < q->num_direct_qdiscs && q->direct_qdiscs[i]; i++)
qdisc_put(q->direct_qdiscs[i]);
kfree(q->direct_qdiscs);
}
static int htb_delete(struct Qdisc *sch, unsigned long arg,
struct netlink_ext_ack *extack)
{
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = (struct htb_class *)arg;
struct Qdisc *new_q = NULL;
int last_child = 0;
int err;
/* TODO: why don't allow to delete subtree ? references ? does
* tc subsys guarantee us that in htb_destroy it holds no class
* refs so that we can remove children safely there ?
*/
if (cl->children || qdisc_class_in_use(&cl->common)) {
NL_SET_ERR_MSG(extack, "HTB class in use");
return -EBUSY;
}
if (!cl->level && htb_parent_last_child(cl))
last_child = 1;
if (q->offload) {
err = htb_destroy_class_offload(sch, cl, last_child, false,
extack);
if (err)
return err;
}
if (last_child) {
struct netdev_queue *dev_queue = sch->dev_queue;
if (q->offload)
dev_queue = htb_offload_get_queue(cl);
new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
cl->parent->common.classid,
NULL);
if (q->offload) {
if (new_q)
htb_set_lockdep_class_child(new_q);
htb_parent_to_leaf_offload(sch, dev_queue, new_q);
}
}
sch_tree_lock(sch);
if (!cl->level)
qdisc_purge_queue(cl->leaf.q);
/* delete from hash and active; remainder in destroy_class */
qdisc_class_hash_remove(&q->clhash, &cl->common);
if (cl->parent)
cl->parent->children--;
if (cl->prio_activity)
htb_deactivate(q, cl);
if (cl->cmode != HTB_CAN_SEND)
htb_safe_rb_erase(&cl->pq_node,
&q->hlevel[cl->level].wait_pq);
if (last_child)
htb_parent_to_leaf(sch, cl, new_q);
sch_tree_unlock(sch);
htb_destroy_class(sch, cl);
return 0;
}
static int htb_change_class(struct Qdisc *sch, u32 classid,
u32 parentid, struct nlattr **tca,
unsigned long *arg, struct netlink_ext_ack *extack)
{
int err = -EINVAL;
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = (struct htb_class *)*arg, *parent;
struct tc_htb_qopt_offload offload_opt;
struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_HTB_MAX + 1];
struct Qdisc *parent_qdisc = NULL;
struct netdev_queue *dev_queue;
struct tc_htb_opt *hopt;
u64 rate64, ceil64;
int warn = 0;
/* extract all subattrs from opt attr */
if (!opt)
goto failure;
err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy,
extack);
if (err < 0)
goto failure;
err = -EINVAL;
if (tb[TCA_HTB_PARMS] == NULL)
goto failure;
parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
hopt = nla_data(tb[TCA_HTB_PARMS]);
if (!hopt->rate.rate || !hopt->ceil.rate)
goto failure;
if (q->offload) {
/* Options not supported by the offload. */
if (hopt->rate.overhead || hopt->ceil.overhead) {
NL_SET_ERR_MSG(extack, "HTB offload doesn't support the overhead parameter");
goto failure;
}
if (hopt->rate.mpu || hopt->ceil.mpu) {
NL_SET_ERR_MSG(extack, "HTB offload doesn't support the mpu parameter");
goto failure;
}
}
/* Keeping backward compatible with rate_table based iproute2 tc */
if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB],
NULL));
if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE)
qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB],
NULL));
rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
if (!cl) { /* new class */
struct net_device *dev = qdisc_dev(sch);
struct Qdisc *new_q, *old_q;
int prio;
struct {
struct nlattr nla;
struct gnet_estimator opt;
} est = {
.nla = {
.nla_len = nla_attr_size(sizeof(est.opt)),
.nla_type = TCA_RATE,
},
.opt = {
/* 4s interval, 16s averaging constant */
.interval = 2,
.ewma_log = 2,
},
};
/* check for valid classid */
if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
htb_find(classid, sch))
goto failure;
/* check maximal depth */
if (parent && parent->parent && parent->parent->level < 2) {
NL_SET_ERR_MSG_MOD(extack, "tree is too deep");
goto failure;
}
err = -ENOBUFS;
cl = kzalloc(sizeof(*cl), GFP_KERNEL);
if (!cl)
goto failure;
gnet_stats_basic_sync_init(&cl->bstats);
gnet_stats_basic_sync_init(&cl->bstats_bias);
err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
if (err) {
kfree(cl);
goto failure;
}
if (htb_rate_est || tca[TCA_RATE]) {
err = gen_new_estimator(&cl->bstats, NULL,
&cl->rate_est,
NULL,
true,
tca[TCA_RATE] ? : &est.nla);
if (err)
goto err_block_put;
}
cl->children = 0;
RB_CLEAR_NODE(&cl->pq_node);
for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
RB_CLEAR_NODE(&cl->node[prio]);
cl->common.classid = classid;
/* Make sure nothing interrupts us in between of two
* ndo_setup_tc calls.
*/
ASSERT_RTNL();
/* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
* so that can't be used inside of sch_tree_lock
* -- thanks to Karlis Peisenieks
*/
if (!q->offload) {
dev_queue = sch->dev_queue;
} else if (!(parent && !parent->level)) {
/* Assign a dev_queue to this classid. */
offload_opt = (struct tc_htb_qopt_offload) {
.command = TC_HTB_LEAF_ALLOC_QUEUE,
.classid = cl->common.classid,
.parent_classid = parent ?
TC_H_MIN(parent->common.classid) :
TC_HTB_CLASSID_ROOT,
.rate = max_t(u64, hopt->rate.rate, rate64),
.ceil = max_t(u64, hopt->ceil.rate, ceil64),
.prio = hopt->prio,
.quantum = hopt->quantum,
.extack = extack,
};
err = htb_offload(dev, &offload_opt);
if (err) {
NL_SET_ERR_MSG_WEAK(extack,
"Failed to offload TC_HTB_LEAF_ALLOC_QUEUE");
goto err_kill_estimator;
}
dev_queue = netdev_get_tx_queue(dev, offload_opt.qid);
} else { /* First child. */
dev_queue = htb_offload_get_queue(parent);
old_q = htb_graft_helper(dev_queue, NULL);
WARN_ON(old_q != parent->leaf.q);
offload_opt = (struct tc_htb_qopt_offload) {
.command = TC_HTB_LEAF_TO_INNER,
.classid = cl->common.classid,
.parent_classid =
TC_H_MIN(parent->common.classid),
.rate = max_t(u64, hopt->rate.rate, rate64),
.ceil = max_t(u64, hopt->ceil.rate, ceil64),
.prio = hopt->prio,
.quantum = hopt->quantum,
.extack = extack,
};
err = htb_offload(dev, &offload_opt);
if (err) {
NL_SET_ERR_MSG_WEAK(extack,
"Failed to offload TC_HTB_LEAF_TO_INNER");
htb_graft_helper(dev_queue, old_q);
goto err_kill_estimator;
}
_bstats_update(&parent->bstats_bias,
u64_stats_read(&old_q->bstats.bytes),
u64_stats_read(&old_q->bstats.packets));
qdisc_put(old_q);
}
new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
classid, NULL);
if (q->offload) {
if (new_q) {
htb_set_lockdep_class_child(new_q);
/* One ref for cl->leaf.q, the other for
* dev_queue->qdisc.
*/
qdisc_refcount_inc(new_q);
}
old_q = htb_graft_helper(dev_queue, new_q);
/* No qdisc_put needed. */
WARN_ON(!(old_q->flags & TCQ_F_BUILTIN));
}
sch_tree_lock(sch);
if (parent && !parent->level) {
/* turn parent into inner node */
qdisc_purge_queue(parent->leaf.q);
parent_qdisc = parent->leaf.q;
if (parent->prio_activity)
htb_deactivate(q, parent);
/* remove from evt list because of level change */
if (parent->cmode != HTB_CAN_SEND) {
htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq);
parent->cmode = HTB_CAN_SEND;
}
parent->level = (parent->parent ? parent->parent->level
: TC_HTB_MAXDEPTH) - 1;
memset(&parent->inner, 0, sizeof(parent->inner));
}
/* leaf (we) needs elementary qdisc */
cl->leaf.q = new_q ? new_q : &noop_qdisc;
if (q->offload)
cl->leaf.offload_queue = dev_queue;
cl->parent = parent;
/* set class to be in HTB_CAN_SEND state */
cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */
cl->t_c = ktime_get_ns();
cl->cmode = HTB_CAN_SEND;
/* attach to the hash list and parent's family */
qdisc_class_hash_insert(&q->clhash, &cl->common);
if (parent)
parent->children++;
if (cl->leaf.q != &noop_qdisc)
qdisc_hash_add(cl->leaf.q, true);
} else {
if (tca[TCA_RATE]) {
err = gen_replace_estimator(&cl->bstats, NULL,
&cl->rate_est,
NULL,
true,
tca[TCA_RATE]);
if (err)
return err;
}
if (q->offload) {
struct net_device *dev = qdisc_dev(sch);
offload_opt = (struct tc_htb_qopt_offload) {
.command = TC_HTB_NODE_MODIFY,
.classid = cl->common.classid,
.rate = max_t(u64, hopt->rate.rate, rate64),
.ceil = max_t(u64, hopt->ceil.rate, ceil64),
.prio = hopt->prio,
.quantum = hopt->quantum,
.extack = extack,
};
err = htb_offload(dev, &offload_opt);
if (err)
/* Estimator was replaced, and rollback may fail
* as well, so we don't try to recover it, and
* the estimator won't work property with the
* offload anyway, because bstats are updated
* only when the stats are queried.
*/
return err;
}
sch_tree_lock(sch);
}
psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
/* it used to be a nasty bug here, we have to check that node
* is really leaf before changing cl->leaf !
*/
if (!cl->level) {
u64 quantum = cl->rate.rate_bytes_ps;
do_div(quantum, q->rate2quantum);
cl->quantum = min_t(u64, quantum, INT_MAX);
if (!hopt->quantum && cl->quantum < 1000) {
warn = -1;
cl->quantum = 1000;
}
if (!hopt->quantum && cl->quantum > 200000) {
warn = 1;
cl->quantum = 200000;
}
if (hopt->quantum)
cl->quantum = hopt->quantum;
if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
cl->prio = TC_HTB_NUMPRIO - 1;
}
cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
sch_tree_unlock(sch);
qdisc_put(parent_qdisc);
if (warn)
NL_SET_ERR_MSG_FMT_MOD(extack,
"quantum of class %X is %s. Consider r2q change.",
cl->common.classid, (warn == -1 ? "small" : "big"));
qdisc_class_hash_grow(sch, &q->clhash);
*arg = (unsigned long)cl;
return 0;
err_kill_estimator:
gen_kill_estimator(&cl->rate_est);
err_block_put:
tcf_block_put(cl->block);
kfree(cl);
failure:
return err;
}
static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg,
struct netlink_ext_ack *extack)
{
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = (struct htb_class *)arg;
return cl ? cl->block : q->block;
}
static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
u32 classid)
{
struct htb_class *cl = htb_find(classid, sch);
/*if (cl && !cl->level) return 0;
* The line above used to be there to prevent attaching filters to
* leaves. But at least tc_index filter uses this just to get class
* for other reasons so that we have to allow for it.
* ----
* 19.6.2002 As Werner explained it is ok - bind filter is just
* another way to "lock" the class - unlike "get" this lock can
* be broken by class during destroy IIUC.
*/
if (cl)
qdisc_class_get(&cl->common);
return (unsigned long)cl;
}
static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
{
struct htb_class *cl = (struct htb_class *)arg;
qdisc_class_put(&cl->common);
}
static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl;
unsigned int i;
if (arg->stop)
return;
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
if (!tc_qdisc_stats_dump(sch, (unsigned long)cl, arg))
return;
}
}
}
static const struct Qdisc_class_ops htb_class_ops = {
.select_queue = htb_select_queue,
.graft = htb_graft,
.leaf = htb_leaf,
.qlen_notify = htb_qlen_notify,
.find = htb_search,
.change = htb_change_class,
.delete = htb_delete,
.walk = htb_walk,
.tcf_block = htb_tcf_block,
.bind_tcf = htb_bind_filter,
.unbind_tcf = htb_unbind_filter,
.dump = htb_dump_class,
.dump_stats = htb_dump_class_stats,
};
static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
.cl_ops = &htb_class_ops,
.id = "htb",
.priv_size = sizeof(struct htb_sched),
.enqueue = htb_enqueue,
.dequeue = htb_dequeue,
.peek = qdisc_peek_dequeued,
.init = htb_init,
.attach = htb_attach,
.reset = htb_reset,
.destroy = htb_destroy,
.dump = htb_dump,
.owner = THIS_MODULE,
};
static int __init htb_module_init(void)
{
return register_qdisc(&htb_qdisc_ops);
}
static void __exit htb_module_exit(void)
{
unregister_qdisc(&htb_qdisc_ops);
}
module_init(htb_module_init)
module_exit(htb_module_exit)
MODULE_LICENSE("GPL");
| linux-master | net/sched/sch_htb.c |
// SPDX-License-Identifier: GPL-2.0-only
/* net/sched/sch_hhf.c Heavy-Hitter Filter (HHF)
*
* Copyright (C) 2013 Terry Lam <[email protected]>
* Copyright (C) 2013 Nandita Dukkipati <[email protected]>
*/
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
#include <linux/siphash.h>
#include <net/pkt_sched.h>
#include <net/sock.h>
/* Heavy-Hitter Filter (HHF)
*
* Principles :
* Flows are classified into two buckets: non-heavy-hitter and heavy-hitter
* buckets. Initially, a new flow starts as non-heavy-hitter. Once classified
* as heavy-hitter, it is immediately switched to the heavy-hitter bucket.
* The buckets are dequeued by a Weighted Deficit Round Robin (WDRR) scheduler,
* in which the heavy-hitter bucket is served with less weight.
* In other words, non-heavy-hitters (e.g., short bursts of critical traffic)
* are isolated from heavy-hitters (e.g., persistent bulk traffic) and also have
* higher share of bandwidth.
*
* To capture heavy-hitters, we use the "multi-stage filter" algorithm in the
* following paper:
* [EV02] C. Estan and G. Varghese, "New Directions in Traffic Measurement and
* Accounting", in ACM SIGCOMM, 2002.
*
* Conceptually, a multi-stage filter comprises k independent hash functions
* and k counter arrays. Packets are indexed into k counter arrays by k hash
* functions, respectively. The counters are then increased by the packet sizes.
* Therefore,
* - For a heavy-hitter flow: *all* of its k array counters must be large.
* - For a non-heavy-hitter flow: some of its k array counters can be large
* due to hash collision with other small flows; however, with high
* probability, not *all* k counters are large.
*
* By the design of the multi-stage filter algorithm, the false negative rate
* (heavy-hitters getting away uncaptured) is zero. However, the algorithm is
* susceptible to false positives (non-heavy-hitters mistakenly classified as
* heavy-hitters).
* Therefore, we also implement the following optimizations to reduce false
* positives by avoiding unnecessary increment of the counter values:
* - Optimization O1: once a heavy-hitter is identified, its bytes are not
* accounted in the array counters. This technique is called "shielding"
* in Section 3.3.1 of [EV02].
* - Optimization O2: conservative update of counters
* (Section 3.3.2 of [EV02]),
* New counter value = max {old counter value,
* smallest counter value + packet bytes}
*
* Finally, we refresh the counters periodically since otherwise the counter
* values will keep accumulating.
*
* Once a flow is classified as heavy-hitter, we also save its per-flow state
* in an exact-matching flow table so that its subsequent packets can be
* dispatched to the heavy-hitter bucket accordingly.
*
*
* At a high level, this qdisc works as follows:
* Given a packet p:
* - If the flow-id of p (e.g., TCP 5-tuple) is already in the exact-matching
* heavy-hitter flow table, denoted table T, then send p to the heavy-hitter
* bucket.
* - Otherwise, forward p to the multi-stage filter, denoted filter F
* + If F decides that p belongs to a non-heavy-hitter flow, then send p
* to the non-heavy-hitter bucket.
* + Otherwise, if F decides that p belongs to a new heavy-hitter flow,
* then set up a new flow entry for the flow-id of p in the table T and
* send p to the heavy-hitter bucket.
*
* In this implementation:
* - T is a fixed-size hash-table with 1024 entries. Hash collision is
* resolved by linked-list chaining.
* - F has four counter arrays, each array containing 1024 32-bit counters.
* That means 4 * 1024 * 32 bits = 16KB of memory.
* - Since each array in F contains 1024 counters, 10 bits are sufficient to
* index into each array.
* Hence, instead of having four hash functions, we chop the 32-bit
* skb-hash into three 10-bit chunks, and the remaining 10-bit chunk is
* computed as XOR sum of those three chunks.
* - We need to clear the counter arrays periodically; however, directly
* memsetting 16KB of memory can lead to cache eviction and unwanted delay.
* So by representing each counter by a valid bit, we only need to reset
* 4K of 1 bit (i.e. 512 bytes) instead of 16KB of memory.
* - The Deficit Round Robin engine is taken from fq_codel implementation
* (net/sched/sch_fq_codel.c). Note that wdrr_bucket corresponds to
* fq_codel_flow in fq_codel implementation.
*
*/
/* Non-configurable parameters */
#define HH_FLOWS_CNT 1024 /* number of entries in exact-matching table T */
#define HHF_ARRAYS_CNT 4 /* number of arrays in multi-stage filter F */
#define HHF_ARRAYS_LEN 1024 /* number of counters in each array of F */
#define HHF_BIT_MASK_LEN 10 /* masking 10 bits */
#define HHF_BIT_MASK 0x3FF /* bitmask of 10 bits */
#define WDRR_BUCKET_CNT 2 /* two buckets for Weighted DRR */
enum wdrr_bucket_idx {
WDRR_BUCKET_FOR_HH = 0, /* bucket id for heavy-hitters */
WDRR_BUCKET_FOR_NON_HH = 1 /* bucket id for non-heavy-hitters */
};
#define hhf_time_before(a, b) \
(typecheck(u32, a) && typecheck(u32, b) && ((s32)((a) - (b)) < 0))
/* Heavy-hitter per-flow state */
struct hh_flow_state {
u32 hash_id; /* hash of flow-id (e.g. TCP 5-tuple) */
u32 hit_timestamp; /* last time heavy-hitter was seen */
struct list_head flowchain; /* chaining under hash collision */
};
/* Weighted Deficit Round Robin (WDRR) scheduler */
struct wdrr_bucket {
struct sk_buff *head;
struct sk_buff *tail;
struct list_head bucketchain;
int deficit;
};
struct hhf_sched_data {
struct wdrr_bucket buckets[WDRR_BUCKET_CNT];
siphash_key_t perturbation; /* hash perturbation */
u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
u32 drop_overlimit; /* number of times max qdisc packet
* limit was hit
*/
struct list_head *hh_flows; /* table T (currently active HHs) */
u32 hh_flows_limit; /* max active HH allocs */
u32 hh_flows_overlimit; /* num of disallowed HH allocs */
u32 hh_flows_total_cnt; /* total admitted HHs */
u32 hh_flows_current_cnt; /* total current HHs */
u32 *hhf_arrays[HHF_ARRAYS_CNT]; /* HH filter F */
u32 hhf_arrays_reset_timestamp; /* last time hhf_arrays
* was reset
*/
unsigned long *hhf_valid_bits[HHF_ARRAYS_CNT]; /* shadow valid bits
* of hhf_arrays
*/
/* Similar to the "new_flows" vs. "old_flows" concept in fq_codel DRR */
struct list_head new_buckets; /* list of new buckets */
struct list_head old_buckets; /* list of old buckets */
/* Configurable HHF parameters */
u32 hhf_reset_timeout; /* interval to reset counter
* arrays in filter F
* (default 40ms)
*/
u32 hhf_admit_bytes; /* counter thresh to classify as
* HH (default 128KB).
* With these default values,
* 128KB / 40ms = 25 Mbps
* i.e., we expect to capture HHs
* sending > 25 Mbps.
*/
u32 hhf_evict_timeout; /* aging threshold to evict idle
* HHs out of table T. This should
* be large enough to avoid
* reordering during HH eviction.
* (default 1s)
*/
u32 hhf_non_hh_weight; /* WDRR weight for non-HHs
* (default 2,
* i.e., non-HH : HH = 2 : 1)
*/
};
static u32 hhf_time_stamp(void)
{
return jiffies;
}
/* Looks up a heavy-hitter flow in a chaining list of table T. */
static struct hh_flow_state *seek_list(const u32 hash,
struct list_head *head,
struct hhf_sched_data *q)
{
struct hh_flow_state *flow, *next;
u32 now = hhf_time_stamp();
if (list_empty(head))
return NULL;
list_for_each_entry_safe(flow, next, head, flowchain) {
u32 prev = flow->hit_timestamp + q->hhf_evict_timeout;
if (hhf_time_before(prev, now)) {
/* Delete expired heavy-hitters, but preserve one entry
* to avoid kzalloc() when next time this slot is hit.
*/
if (list_is_last(&flow->flowchain, head))
return NULL;
list_del(&flow->flowchain);
kfree(flow);
q->hh_flows_current_cnt--;
} else if (flow->hash_id == hash) {
return flow;
}
}
return NULL;
}
/* Returns a flow state entry for a new heavy-hitter. Either reuses an expired
* entry or dynamically alloc a new entry.
*/
static struct hh_flow_state *alloc_new_hh(struct list_head *head,
struct hhf_sched_data *q)
{
struct hh_flow_state *flow;
u32 now = hhf_time_stamp();
if (!list_empty(head)) {
/* Find an expired heavy-hitter flow entry. */
list_for_each_entry(flow, head, flowchain) {
u32 prev = flow->hit_timestamp + q->hhf_evict_timeout;
if (hhf_time_before(prev, now))
return flow;
}
}
if (q->hh_flows_current_cnt >= q->hh_flows_limit) {
q->hh_flows_overlimit++;
return NULL;
}
/* Create new entry. */
flow = kzalloc(sizeof(struct hh_flow_state), GFP_ATOMIC);
if (!flow)
return NULL;
q->hh_flows_current_cnt++;
INIT_LIST_HEAD(&flow->flowchain);
list_add_tail(&flow->flowchain, head);
return flow;
}
/* Assigns packets to WDRR buckets. Implements a multi-stage filter to
* classify heavy-hitters.
*/
static enum wdrr_bucket_idx hhf_classify(struct sk_buff *skb, struct Qdisc *sch)
{
struct hhf_sched_data *q = qdisc_priv(sch);
u32 tmp_hash, hash;
u32 xorsum, filter_pos[HHF_ARRAYS_CNT], flow_pos;
struct hh_flow_state *flow;
u32 pkt_len, min_hhf_val;
int i;
u32 prev;
u32 now = hhf_time_stamp();
/* Reset the HHF counter arrays if this is the right time. */
prev = q->hhf_arrays_reset_timestamp + q->hhf_reset_timeout;
if (hhf_time_before(prev, now)) {
for (i = 0; i < HHF_ARRAYS_CNT; i++)
bitmap_zero(q->hhf_valid_bits[i], HHF_ARRAYS_LEN);
q->hhf_arrays_reset_timestamp = now;
}
/* Get hashed flow-id of the skb. */
hash = skb_get_hash_perturb(skb, &q->perturbation);
/* Check if this packet belongs to an already established HH flow. */
flow_pos = hash & HHF_BIT_MASK;
flow = seek_list(hash, &q->hh_flows[flow_pos], q);
if (flow) { /* found its HH flow */
flow->hit_timestamp = now;
return WDRR_BUCKET_FOR_HH;
}
/* Now pass the packet through the multi-stage filter. */
tmp_hash = hash;
xorsum = 0;
for (i = 0; i < HHF_ARRAYS_CNT - 1; i++) {
/* Split the skb_hash into three 10-bit chunks. */
filter_pos[i] = tmp_hash & HHF_BIT_MASK;
xorsum ^= filter_pos[i];
tmp_hash >>= HHF_BIT_MASK_LEN;
}
/* The last chunk is computed as XOR sum of other chunks. */
filter_pos[HHF_ARRAYS_CNT - 1] = xorsum ^ tmp_hash;
pkt_len = qdisc_pkt_len(skb);
min_hhf_val = ~0U;
for (i = 0; i < HHF_ARRAYS_CNT; i++) {
u32 val;
if (!test_bit(filter_pos[i], q->hhf_valid_bits[i])) {
q->hhf_arrays[i][filter_pos[i]] = 0;
__set_bit(filter_pos[i], q->hhf_valid_bits[i]);
}
val = q->hhf_arrays[i][filter_pos[i]] + pkt_len;
if (min_hhf_val > val)
min_hhf_val = val;
}
/* Found a new HH iff all counter values > HH admit threshold. */
if (min_hhf_val > q->hhf_admit_bytes) {
/* Just captured a new heavy-hitter. */
flow = alloc_new_hh(&q->hh_flows[flow_pos], q);
if (!flow) /* memory alloc problem */
return WDRR_BUCKET_FOR_NON_HH;
flow->hash_id = hash;
flow->hit_timestamp = now;
q->hh_flows_total_cnt++;
/* By returning without updating counters in q->hhf_arrays,
* we implicitly implement "shielding" (see Optimization O1).
*/
return WDRR_BUCKET_FOR_HH;
}
/* Conservative update of HHF arrays (see Optimization O2). */
for (i = 0; i < HHF_ARRAYS_CNT; i++) {
if (q->hhf_arrays[i][filter_pos[i]] < min_hhf_val)
q->hhf_arrays[i][filter_pos[i]] = min_hhf_val;
}
return WDRR_BUCKET_FOR_NON_HH;
}
/* Removes one skb from head of bucket. */
static struct sk_buff *dequeue_head(struct wdrr_bucket *bucket)
{
struct sk_buff *skb = bucket->head;
bucket->head = skb->next;
skb_mark_not_on_list(skb);
return skb;
}
/* Tail-adds skb to bucket. */
static void bucket_add(struct wdrr_bucket *bucket, struct sk_buff *skb)
{
if (bucket->head == NULL)
bucket->head = skb;
else
bucket->tail->next = skb;
bucket->tail = skb;
skb->next = NULL;
}
static unsigned int hhf_drop(struct Qdisc *sch, struct sk_buff **to_free)
{
struct hhf_sched_data *q = qdisc_priv(sch);
struct wdrr_bucket *bucket;
/* Always try to drop from heavy-hitters first. */
bucket = &q->buckets[WDRR_BUCKET_FOR_HH];
if (!bucket->head)
bucket = &q->buckets[WDRR_BUCKET_FOR_NON_HH];
if (bucket->head) {
struct sk_buff *skb = dequeue_head(bucket);
sch->q.qlen--;
qdisc_qstats_backlog_dec(sch, skb);
qdisc_drop(skb, sch, to_free);
}
/* Return id of the bucket from which the packet was dropped. */
return bucket - q->buckets;
}
static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct hhf_sched_data *q = qdisc_priv(sch);
enum wdrr_bucket_idx idx;
struct wdrr_bucket *bucket;
unsigned int prev_backlog;
idx = hhf_classify(skb, sch);
bucket = &q->buckets[idx];
bucket_add(bucket, skb);
qdisc_qstats_backlog_inc(sch, skb);
if (list_empty(&bucket->bucketchain)) {
unsigned int weight;
/* The logic of new_buckets vs. old_buckets is the same as
* new_flows vs. old_flows in the implementation of fq_codel,
* i.e., short bursts of non-HHs should have strict priority.
*/
if (idx == WDRR_BUCKET_FOR_HH) {
/* Always move heavy-hitters to old bucket. */
weight = 1;
list_add_tail(&bucket->bucketchain, &q->old_buckets);
} else {
weight = q->hhf_non_hh_weight;
list_add_tail(&bucket->bucketchain, &q->new_buckets);
}
bucket->deficit = weight * q->quantum;
}
if (++sch->q.qlen <= sch->limit)
return NET_XMIT_SUCCESS;
prev_backlog = sch->qstats.backlog;
q->drop_overlimit++;
/* Return Congestion Notification only if we dropped a packet from this
* bucket.
*/
if (hhf_drop(sch, to_free) == idx)
return NET_XMIT_CN;
/* As we dropped a packet, better let upper stack know this. */
qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
return NET_XMIT_SUCCESS;
}
static struct sk_buff *hhf_dequeue(struct Qdisc *sch)
{
struct hhf_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb = NULL;
struct wdrr_bucket *bucket;
struct list_head *head;
begin:
head = &q->new_buckets;
if (list_empty(head)) {
head = &q->old_buckets;
if (list_empty(head))
return NULL;
}
bucket = list_first_entry(head, struct wdrr_bucket, bucketchain);
if (bucket->deficit <= 0) {
int weight = (bucket - q->buckets == WDRR_BUCKET_FOR_HH) ?
1 : q->hhf_non_hh_weight;
bucket->deficit += weight * q->quantum;
list_move_tail(&bucket->bucketchain, &q->old_buckets);
goto begin;
}
if (bucket->head) {
skb = dequeue_head(bucket);
sch->q.qlen--;
qdisc_qstats_backlog_dec(sch, skb);
}
if (!skb) {
/* Force a pass through old_buckets to prevent starvation. */
if ((head == &q->new_buckets) && !list_empty(&q->old_buckets))
list_move_tail(&bucket->bucketchain, &q->old_buckets);
else
list_del_init(&bucket->bucketchain);
goto begin;
}
qdisc_bstats_update(sch, skb);
bucket->deficit -= qdisc_pkt_len(skb);
return skb;
}
static void hhf_reset(struct Qdisc *sch)
{
struct sk_buff *skb;
while ((skb = hhf_dequeue(sch)) != NULL)
rtnl_kfree_skbs(skb, skb);
}
static void hhf_destroy(struct Qdisc *sch)
{
int i;
struct hhf_sched_data *q = qdisc_priv(sch);
for (i = 0; i < HHF_ARRAYS_CNT; i++) {
kvfree(q->hhf_arrays[i]);
kvfree(q->hhf_valid_bits[i]);
}
if (!q->hh_flows)
return;
for (i = 0; i < HH_FLOWS_CNT; i++) {
struct hh_flow_state *flow, *next;
struct list_head *head = &q->hh_flows[i];
if (list_empty(head))
continue;
list_for_each_entry_safe(flow, next, head, flowchain) {
list_del(&flow->flowchain);
kfree(flow);
}
}
kvfree(q->hh_flows);
}
static const struct nla_policy hhf_policy[TCA_HHF_MAX + 1] = {
[TCA_HHF_BACKLOG_LIMIT] = { .type = NLA_U32 },
[TCA_HHF_QUANTUM] = { .type = NLA_U32 },
[TCA_HHF_HH_FLOWS_LIMIT] = { .type = NLA_U32 },
[TCA_HHF_RESET_TIMEOUT] = { .type = NLA_U32 },
[TCA_HHF_ADMIT_BYTES] = { .type = NLA_U32 },
[TCA_HHF_EVICT_TIMEOUT] = { .type = NLA_U32 },
[TCA_HHF_NON_HH_WEIGHT] = { .type = NLA_U32 },
};
static int hhf_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
struct hhf_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_HHF_MAX + 1];
unsigned int qlen, prev_backlog;
int err;
u64 non_hh_quantum;
u32 new_quantum = q->quantum;
u32 new_hhf_non_hh_weight = q->hhf_non_hh_weight;
err = nla_parse_nested_deprecated(tb, TCA_HHF_MAX, opt, hhf_policy,
NULL);
if (err < 0)
return err;
if (tb[TCA_HHF_QUANTUM])
new_quantum = nla_get_u32(tb[TCA_HHF_QUANTUM]);
if (tb[TCA_HHF_NON_HH_WEIGHT])
new_hhf_non_hh_weight = nla_get_u32(tb[TCA_HHF_NON_HH_WEIGHT]);
non_hh_quantum = (u64)new_quantum * new_hhf_non_hh_weight;
if (non_hh_quantum == 0 || non_hh_quantum > INT_MAX)
return -EINVAL;
sch_tree_lock(sch);
if (tb[TCA_HHF_BACKLOG_LIMIT])
sch->limit = nla_get_u32(tb[TCA_HHF_BACKLOG_LIMIT]);
q->quantum = new_quantum;
q->hhf_non_hh_weight = new_hhf_non_hh_weight;
if (tb[TCA_HHF_HH_FLOWS_LIMIT])
q->hh_flows_limit = nla_get_u32(tb[TCA_HHF_HH_FLOWS_LIMIT]);
if (tb[TCA_HHF_RESET_TIMEOUT]) {
u32 us = nla_get_u32(tb[TCA_HHF_RESET_TIMEOUT]);
q->hhf_reset_timeout = usecs_to_jiffies(us);
}
if (tb[TCA_HHF_ADMIT_BYTES])
q->hhf_admit_bytes = nla_get_u32(tb[TCA_HHF_ADMIT_BYTES]);
if (tb[TCA_HHF_EVICT_TIMEOUT]) {
u32 us = nla_get_u32(tb[TCA_HHF_EVICT_TIMEOUT]);
q->hhf_evict_timeout = usecs_to_jiffies(us);
}
qlen = sch->q.qlen;
prev_backlog = sch->qstats.backlog;
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = hhf_dequeue(sch);
rtnl_kfree_skbs(skb, skb);
}
qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen,
prev_backlog - sch->qstats.backlog);
sch_tree_unlock(sch);
return 0;
}
static int hhf_init(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
struct hhf_sched_data *q = qdisc_priv(sch);
int i;
sch->limit = 1000;
q->quantum = psched_mtu(qdisc_dev(sch));
get_random_bytes(&q->perturbation, sizeof(q->perturbation));
INIT_LIST_HEAD(&q->new_buckets);
INIT_LIST_HEAD(&q->old_buckets);
/* Configurable HHF parameters */
q->hhf_reset_timeout = HZ / 25; /* 40 ms */
q->hhf_admit_bytes = 131072; /* 128 KB */
q->hhf_evict_timeout = HZ; /* 1 sec */
q->hhf_non_hh_weight = 2;
if (opt) {
int err = hhf_change(sch, opt, extack);
if (err)
return err;
}
if (!q->hh_flows) {
/* Initialize heavy-hitter flow table. */
q->hh_flows = kvcalloc(HH_FLOWS_CNT, sizeof(struct list_head),
GFP_KERNEL);
if (!q->hh_flows)
return -ENOMEM;
for (i = 0; i < HH_FLOWS_CNT; i++)
INIT_LIST_HEAD(&q->hh_flows[i]);
/* Cap max active HHs at twice len of hh_flows table. */
q->hh_flows_limit = 2 * HH_FLOWS_CNT;
q->hh_flows_overlimit = 0;
q->hh_flows_total_cnt = 0;
q->hh_flows_current_cnt = 0;
/* Initialize heavy-hitter filter arrays. */
for (i = 0; i < HHF_ARRAYS_CNT; i++) {
q->hhf_arrays[i] = kvcalloc(HHF_ARRAYS_LEN,
sizeof(u32),
GFP_KERNEL);
if (!q->hhf_arrays[i]) {
/* Note: hhf_destroy() will be called
* by our caller.
*/
return -ENOMEM;
}
}
q->hhf_arrays_reset_timestamp = hhf_time_stamp();
/* Initialize valid bits of heavy-hitter filter arrays. */
for (i = 0; i < HHF_ARRAYS_CNT; i++) {
q->hhf_valid_bits[i] = kvzalloc(HHF_ARRAYS_LEN /
BITS_PER_BYTE, GFP_KERNEL);
if (!q->hhf_valid_bits[i]) {
/* Note: hhf_destroy() will be called
* by our caller.
*/
return -ENOMEM;
}
}
/* Initialize Weighted DRR buckets. */
for (i = 0; i < WDRR_BUCKET_CNT; i++) {
struct wdrr_bucket *bucket = q->buckets + i;
INIT_LIST_HEAD(&bucket->bucketchain);
}
}
return 0;
}
static int hhf_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct hhf_sched_data *q = qdisc_priv(sch);
struct nlattr *opts;
opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
if (opts == NULL)
goto nla_put_failure;
if (nla_put_u32(skb, TCA_HHF_BACKLOG_LIMIT, sch->limit) ||
nla_put_u32(skb, TCA_HHF_QUANTUM, q->quantum) ||
nla_put_u32(skb, TCA_HHF_HH_FLOWS_LIMIT, q->hh_flows_limit) ||
nla_put_u32(skb, TCA_HHF_RESET_TIMEOUT,
jiffies_to_usecs(q->hhf_reset_timeout)) ||
nla_put_u32(skb, TCA_HHF_ADMIT_BYTES, q->hhf_admit_bytes) ||
nla_put_u32(skb, TCA_HHF_EVICT_TIMEOUT,
jiffies_to_usecs(q->hhf_evict_timeout)) ||
nla_put_u32(skb, TCA_HHF_NON_HH_WEIGHT, q->hhf_non_hh_weight))
goto nla_put_failure;
return nla_nest_end(skb, opts);
nla_put_failure:
return -1;
}
static int hhf_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
{
struct hhf_sched_data *q = qdisc_priv(sch);
struct tc_hhf_xstats st = {
.drop_overlimit = q->drop_overlimit,
.hh_overlimit = q->hh_flows_overlimit,
.hh_tot_count = q->hh_flows_total_cnt,
.hh_cur_count = q->hh_flows_current_cnt,
};
return gnet_stats_copy_app(d, &st, sizeof(st));
}
static struct Qdisc_ops hhf_qdisc_ops __read_mostly = {
.id = "hhf",
.priv_size = sizeof(struct hhf_sched_data),
.enqueue = hhf_enqueue,
.dequeue = hhf_dequeue,
.peek = qdisc_peek_dequeued,
.init = hhf_init,
.reset = hhf_reset,
.destroy = hhf_destroy,
.change = hhf_change,
.dump = hhf_dump,
.dump_stats = hhf_dump_stats,
.owner = THIS_MODULE,
};
static int __init hhf_module_init(void)
{
return register_qdisc(&hhf_qdisc_ops);
}
static void __exit hhf_module_exit(void)
{
unregister_qdisc(&hhf_qdisc_ops);
}
module_init(hhf_module_init)
module_exit(hhf_module_exit)
MODULE_AUTHOR("Terry Lam");
MODULE_AUTHOR("Nandita Dukkipati");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Heavy-Hitter Filter (HHF)");
| linux-master | net/sched/sch_hhf.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* net/sched/em_ipset.c ipset ematch
*
* Copyright (c) 2012 Florian Westphal <[email protected]>
*/
#include <linux/gfp.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/skbuff.h>
#include <linux/netfilter/xt_set.h>
#include <linux/ipv6.h>
#include <net/ip.h>
#include <net/pkt_cls.h>
static int em_ipset_change(struct net *net, void *data, int data_len,
struct tcf_ematch *em)
{
struct xt_set_info *set = data;
ip_set_id_t index;
if (data_len != sizeof(*set))
return -EINVAL;
index = ip_set_nfnl_get_byindex(net, set->index);
if (index == IPSET_INVALID_ID)
return -ENOENT;
em->datalen = sizeof(*set);
em->data = (unsigned long)kmemdup(data, em->datalen, GFP_KERNEL);
if (em->data)
return 0;
ip_set_nfnl_put(net, index);
return -ENOMEM;
}
static void em_ipset_destroy(struct tcf_ematch *em)
{
const struct xt_set_info *set = (const void *) em->data;
if (set) {
ip_set_nfnl_put(em->net, set->index);
kfree((void *) em->data);
}
}
static int em_ipset_match(struct sk_buff *skb, struct tcf_ematch *em,
struct tcf_pkt_info *info)
{
struct ip_set_adt_opt opt;
struct xt_action_param acpar;
const struct xt_set_info *set = (const void *) em->data;
struct net_device *dev, *indev = NULL;
struct nf_hook_state state = {
.net = em->net,
};
int ret, network_offset;
switch (skb_protocol(skb, true)) {
case htons(ETH_P_IP):
state.pf = NFPROTO_IPV4;
if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
return 0;
acpar.thoff = ip_hdrlen(skb);
break;
case htons(ETH_P_IPV6):
state.pf = NFPROTO_IPV6;
if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
return 0;
/* doesn't call ipv6_find_hdr() because ipset doesn't use thoff, yet */
acpar.thoff = sizeof(struct ipv6hdr);
break;
default:
return 0;
}
opt.family = state.pf;
opt.dim = set->dim;
opt.flags = set->flags;
opt.cmdflags = 0;
opt.ext.timeout = ~0u;
network_offset = skb_network_offset(skb);
skb_pull(skb, network_offset);
dev = skb->dev;
rcu_read_lock();
if (skb->skb_iif)
indev = dev_get_by_index_rcu(em->net, skb->skb_iif);
state.in = indev ? indev : dev;
state.out = dev;
acpar.state = &state;
ret = ip_set_test(set->index, skb, &acpar, &opt);
rcu_read_unlock();
skb_push(skb, network_offset);
return ret;
}
static struct tcf_ematch_ops em_ipset_ops = {
.kind = TCF_EM_IPSET,
.change = em_ipset_change,
.destroy = em_ipset_destroy,
.match = em_ipset_match,
.owner = THIS_MODULE,
.link = LIST_HEAD_INIT(em_ipset_ops.link)
};
static int __init init_em_ipset(void)
{
return tcf_em_register(&em_ipset_ops);
}
static void __exit exit_em_ipset(void)
{
tcf_em_unregister(&em_ipset_ops);
}
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Florian Westphal <[email protected]>");
MODULE_DESCRIPTION("TC extended match for IP sets");
module_init(init_em_ipset);
module_exit(exit_em_ipset);
MODULE_ALIAS_TCF_EMATCH(TCF_EM_IPSET);
| linux-master | net/sched/em_ipset.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* net/sched/cls_route.c ROUTE4 classifier.
*
* Authors: Alexey Kuznetsov, <[email protected]>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <net/dst.h>
#include <net/route.h>
#include <net/netlink.h>
#include <net/act_api.h>
#include <net/pkt_cls.h>
#include <net/tc_wrapper.h>
/*
* 1. For now we assume that route tags < 256.
* It allows to use direct table lookups, instead of hash tables.
* 2. For now we assume that "from TAG" and "fromdev DEV" statements
* are mutually exclusive.
* 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
*/
struct route4_fastmap {
struct route4_filter *filter;
u32 id;
int iif;
};
struct route4_head {
struct route4_fastmap fastmap[16];
struct route4_bucket __rcu *table[256 + 1];
struct rcu_head rcu;
};
struct route4_bucket {
/* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
struct route4_filter __rcu *ht[16 + 16 + 1];
struct rcu_head rcu;
};
struct route4_filter {
struct route4_filter __rcu *next;
u32 id;
int iif;
struct tcf_result res;
struct tcf_exts exts;
u32 handle;
struct route4_bucket *bkt;
struct tcf_proto *tp;
struct rcu_work rwork;
};
#define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
static inline int route4_fastmap_hash(u32 id, int iif)
{
return id & 0xF;
}
static DEFINE_SPINLOCK(fastmap_lock);
static void
route4_reset_fastmap(struct route4_head *head)
{
spin_lock_bh(&fastmap_lock);
memset(head->fastmap, 0, sizeof(head->fastmap));
spin_unlock_bh(&fastmap_lock);
}
static void
route4_set_fastmap(struct route4_head *head, u32 id, int iif,
struct route4_filter *f)
{
int h = route4_fastmap_hash(id, iif);
/* fastmap updates must look atomic to aling id, iff, filter */
spin_lock_bh(&fastmap_lock);
head->fastmap[h].id = id;
head->fastmap[h].iif = iif;
head->fastmap[h].filter = f;
spin_unlock_bh(&fastmap_lock);
}
static inline int route4_hash_to(u32 id)
{
return id & 0xFF;
}
static inline int route4_hash_from(u32 id)
{
return (id >> 16) & 0xF;
}
static inline int route4_hash_iif(int iif)
{
return 16 + ((iif >> 16) & 0xF);
}
static inline int route4_hash_wild(void)
{
return 32;
}
#define ROUTE4_APPLY_RESULT() \
{ \
*res = f->res; \
if (tcf_exts_has_actions(&f->exts)) { \
int r = tcf_exts_exec(skb, &f->exts, res); \
if (r < 0) { \
dont_cache = 1; \
continue; \
} \
return r; \
} else if (!dont_cache) \
route4_set_fastmap(head, id, iif, f); \
return 0; \
}
TC_INDIRECT_SCOPE int route4_classify(struct sk_buff *skb,
const struct tcf_proto *tp,
struct tcf_result *res)
{
struct route4_head *head = rcu_dereference_bh(tp->root);
struct dst_entry *dst;
struct route4_bucket *b;
struct route4_filter *f;
u32 id, h;
int iif, dont_cache = 0;
dst = skb_dst(skb);
if (!dst)
goto failure;
id = dst->tclassid;
iif = inet_iif(skb);
h = route4_fastmap_hash(id, iif);
spin_lock(&fastmap_lock);
if (id == head->fastmap[h].id &&
iif == head->fastmap[h].iif &&
(f = head->fastmap[h].filter) != NULL) {
if (f == ROUTE4_FAILURE) {
spin_unlock(&fastmap_lock);
goto failure;
}
*res = f->res;
spin_unlock(&fastmap_lock);
return 0;
}
spin_unlock(&fastmap_lock);
h = route4_hash_to(id);
restart:
b = rcu_dereference_bh(head->table[h]);
if (b) {
for (f = rcu_dereference_bh(b->ht[route4_hash_from(id)]);
f;
f = rcu_dereference_bh(f->next))
if (f->id == id)
ROUTE4_APPLY_RESULT();
for (f = rcu_dereference_bh(b->ht[route4_hash_iif(iif)]);
f;
f = rcu_dereference_bh(f->next))
if (f->iif == iif)
ROUTE4_APPLY_RESULT();
for (f = rcu_dereference_bh(b->ht[route4_hash_wild()]);
f;
f = rcu_dereference_bh(f->next))
ROUTE4_APPLY_RESULT();
}
if (h < 256) {
h = 256;
id &= ~0xFFFF;
goto restart;
}
if (!dont_cache)
route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
failure:
return -1;
}
static inline u32 to_hash(u32 id)
{
u32 h = id & 0xFF;
if (id & 0x8000)
h += 256;
return h;
}
static inline u32 from_hash(u32 id)
{
id &= 0xFFFF;
if (id == 0xFFFF)
return 32;
if (!(id & 0x8000)) {
if (id > 255)
return 256;
return id & 0xF;
}
return 16 + (id & 0xF);
}
static void *route4_get(struct tcf_proto *tp, u32 handle)
{
struct route4_head *head = rtnl_dereference(tp->root);
struct route4_bucket *b;
struct route4_filter *f;
unsigned int h1, h2;
h1 = to_hash(handle);
if (h1 > 256)
return NULL;
h2 = from_hash(handle >> 16);
if (h2 > 32)
return NULL;
b = rtnl_dereference(head->table[h1]);
if (b) {
for (f = rtnl_dereference(b->ht[h2]);
f;
f = rtnl_dereference(f->next))
if (f->handle == handle)
return f;
}
return NULL;
}
static int route4_init(struct tcf_proto *tp)
{
struct route4_head *head;
head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
if (head == NULL)
return -ENOBUFS;
rcu_assign_pointer(tp->root, head);
return 0;
}
static void __route4_delete_filter(struct route4_filter *f)
{
tcf_exts_destroy(&f->exts);
tcf_exts_put_net(&f->exts);
kfree(f);
}
static void route4_delete_filter_work(struct work_struct *work)
{
struct route4_filter *f = container_of(to_rcu_work(work),
struct route4_filter,
rwork);
rtnl_lock();
__route4_delete_filter(f);
rtnl_unlock();
}
static void route4_queue_work(struct route4_filter *f)
{
tcf_queue_work(&f->rwork, route4_delete_filter_work);
}
static void route4_destroy(struct tcf_proto *tp, bool rtnl_held,
struct netlink_ext_ack *extack)
{
struct route4_head *head = rtnl_dereference(tp->root);
int h1, h2;
if (head == NULL)
return;
for (h1 = 0; h1 <= 256; h1++) {
struct route4_bucket *b;
b = rtnl_dereference(head->table[h1]);
if (b) {
for (h2 = 0; h2 <= 32; h2++) {
struct route4_filter *f;
while ((f = rtnl_dereference(b->ht[h2])) != NULL) {
struct route4_filter *next;
next = rtnl_dereference(f->next);
RCU_INIT_POINTER(b->ht[h2], next);
tcf_unbind_filter(tp, &f->res);
if (tcf_exts_get_net(&f->exts))
route4_queue_work(f);
else
__route4_delete_filter(f);
}
}
RCU_INIT_POINTER(head->table[h1], NULL);
kfree_rcu(b, rcu);
}
}
kfree_rcu(head, rcu);
}
static int route4_delete(struct tcf_proto *tp, void *arg, bool *last,
bool rtnl_held, struct netlink_ext_ack *extack)
{
struct route4_head *head = rtnl_dereference(tp->root);
struct route4_filter *f = arg;
struct route4_filter __rcu **fp;
struct route4_filter *nf;
struct route4_bucket *b;
unsigned int h = 0;
int i, h1;
if (!head || !f)
return -EINVAL;
h = f->handle;
b = f->bkt;
fp = &b->ht[from_hash(h >> 16)];
for (nf = rtnl_dereference(*fp); nf;
fp = &nf->next, nf = rtnl_dereference(*fp)) {
if (nf == f) {
/* unlink it */
RCU_INIT_POINTER(*fp, rtnl_dereference(f->next));
/* Remove any fastmap lookups that might ref filter
* notice we unlink'd the filter so we can't get it
* back in the fastmap.
*/
route4_reset_fastmap(head);
/* Delete it */
tcf_unbind_filter(tp, &f->res);
tcf_exts_get_net(&f->exts);
tcf_queue_work(&f->rwork, route4_delete_filter_work);
/* Strip RTNL protected tree */
for (i = 0; i <= 32; i++) {
struct route4_filter *rt;
rt = rtnl_dereference(b->ht[i]);
if (rt)
goto out;
}
/* OK, session has no flows */
RCU_INIT_POINTER(head->table[to_hash(h)], NULL);
kfree_rcu(b, rcu);
break;
}
}
out:
*last = true;
for (h1 = 0; h1 <= 256; h1++) {
if (rcu_access_pointer(head->table[h1])) {
*last = false;
break;
}
}
return 0;
}
static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = {
[TCA_ROUTE4_CLASSID] = { .type = NLA_U32 },
[TCA_ROUTE4_TO] = { .type = NLA_U32 },
[TCA_ROUTE4_FROM] = { .type = NLA_U32 },
[TCA_ROUTE4_IIF] = { .type = NLA_U32 },
};
static int route4_set_parms(struct net *net, struct tcf_proto *tp,
unsigned long base, struct route4_filter *f,
u32 handle, struct route4_head *head,
struct nlattr **tb, struct nlattr *est, int new,
u32 flags, struct netlink_ext_ack *extack)
{
u32 id = 0, to = 0, nhandle = 0x8000;
struct route4_filter *fp;
unsigned int h1;
struct route4_bucket *b;
int err;
err = tcf_exts_validate(net, tp, tb, est, &f->exts, flags, extack);
if (err < 0)
return err;
if (tb[TCA_ROUTE4_TO]) {
if (new && handle & 0x8000)
return -EINVAL;
to = nla_get_u32(tb[TCA_ROUTE4_TO]);
if (to > 0xFF)
return -EINVAL;
nhandle = to;
}
if (tb[TCA_ROUTE4_FROM]) {
if (tb[TCA_ROUTE4_IIF])
return -EINVAL;
id = nla_get_u32(tb[TCA_ROUTE4_FROM]);
if (id > 0xFF)
return -EINVAL;
nhandle |= id << 16;
} else if (tb[TCA_ROUTE4_IIF]) {
id = nla_get_u32(tb[TCA_ROUTE4_IIF]);
if (id > 0x7FFF)
return -EINVAL;
nhandle |= (id | 0x8000) << 16;
} else
nhandle |= 0xFFFF << 16;
if (handle && new) {
nhandle |= handle & 0x7F00;
if (nhandle != handle)
return -EINVAL;
}
if (!nhandle) {
NL_SET_ERR_MSG(extack, "Replacing with handle of 0 is invalid");
return -EINVAL;
}
h1 = to_hash(nhandle);
b = rtnl_dereference(head->table[h1]);
if (!b) {
b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
if (b == NULL)
return -ENOBUFS;
rcu_assign_pointer(head->table[h1], b);
} else {
unsigned int h2 = from_hash(nhandle >> 16);
for (fp = rtnl_dereference(b->ht[h2]);
fp;
fp = rtnl_dereference(fp->next))
if (fp->handle == f->handle)
return -EEXIST;
}
if (tb[TCA_ROUTE4_TO])
f->id = to;
if (tb[TCA_ROUTE4_FROM])
f->id = to | id<<16;
else if (tb[TCA_ROUTE4_IIF])
f->iif = id;
f->handle = nhandle;
f->bkt = b;
f->tp = tp;
if (tb[TCA_ROUTE4_CLASSID]) {
f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]);
tcf_bind_filter(tp, &f->res, base);
}
return 0;
}
static int route4_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base, u32 handle,
struct nlattr **tca, void **arg, u32 flags,
struct netlink_ext_ack *extack)
{
struct route4_head *head = rtnl_dereference(tp->root);
struct route4_filter __rcu **fp;
struct route4_filter *fold, *f1, *pfp, *f = NULL;
struct route4_bucket *b;
struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_ROUTE4_MAX + 1];
unsigned int h, th;
int err;
bool new = true;
if (!handle) {
NL_SET_ERR_MSG(extack, "Creating with handle of 0 is invalid");
return -EINVAL;
}
if (opt == NULL)
return -EINVAL;
err = nla_parse_nested_deprecated(tb, TCA_ROUTE4_MAX, opt,
route4_policy, NULL);
if (err < 0)
return err;
fold = *arg;
if (fold && fold->handle != handle)
return -EINVAL;
err = -ENOBUFS;
f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
if (!f)
goto errout;
err = tcf_exts_init(&f->exts, net, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
if (err < 0)
goto errout;
if (fold) {
f->id = fold->id;
f->iif = fold->iif;
f->handle = fold->handle;
f->tp = fold->tp;
f->bkt = fold->bkt;
new = false;
}
err = route4_set_parms(net, tp, base, f, handle, head, tb,
tca[TCA_RATE], new, flags, extack);
if (err < 0)
goto errout;
h = from_hash(f->handle >> 16);
fp = &f->bkt->ht[h];
for (pfp = rtnl_dereference(*fp);
(f1 = rtnl_dereference(*fp)) != NULL;
fp = &f1->next)
if (f->handle < f1->handle)
break;
tcf_block_netif_keep_dst(tp->chain->block);
rcu_assign_pointer(f->next, f1);
rcu_assign_pointer(*fp, f);
if (fold) {
th = to_hash(fold->handle);
h = from_hash(fold->handle >> 16);
b = rtnl_dereference(head->table[th]);
if (b) {
fp = &b->ht[h];
for (pfp = rtnl_dereference(*fp); pfp;
fp = &pfp->next, pfp = rtnl_dereference(*fp)) {
if (pfp == fold) {
rcu_assign_pointer(*fp, fold->next);
break;
}
}
}
}
route4_reset_fastmap(head);
*arg = f;
if (fold) {
tcf_unbind_filter(tp, &fold->res);
tcf_exts_get_net(&fold->exts);
tcf_queue_work(&fold->rwork, route4_delete_filter_work);
}
return 0;
errout:
if (f)
tcf_exts_destroy(&f->exts);
kfree(f);
return err;
}
static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg,
bool rtnl_held)
{
struct route4_head *head = rtnl_dereference(tp->root);
unsigned int h, h1;
if (head == NULL || arg->stop)
return;
for (h = 0; h <= 256; h++) {
struct route4_bucket *b = rtnl_dereference(head->table[h]);
if (b) {
for (h1 = 0; h1 <= 32; h1++) {
struct route4_filter *f;
for (f = rtnl_dereference(b->ht[h1]);
f;
f = rtnl_dereference(f->next)) {
if (!tc_cls_stats_dump(tp, arg, f))
return;
}
}
}
}
}
static int route4_dump(struct net *net, struct tcf_proto *tp, void *fh,
struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
{
struct route4_filter *f = fh;
struct nlattr *nest;
u32 id;
if (f == NULL)
return skb->len;
t->tcm_handle = f->handle;
nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
if (!(f->handle & 0x8000)) {
id = f->id & 0xFF;
if (nla_put_u32(skb, TCA_ROUTE4_TO, id))
goto nla_put_failure;
}
if (f->handle & 0x80000000) {
if ((f->handle >> 16) != 0xFFFF &&
nla_put_u32(skb, TCA_ROUTE4_IIF, f->iif))
goto nla_put_failure;
} else {
id = f->id >> 16;
if (nla_put_u32(skb, TCA_ROUTE4_FROM, id))
goto nla_put_failure;
}
if (f->res.classid &&
nla_put_u32(skb, TCA_ROUTE4_CLASSID, f->res.classid))
goto nla_put_failure;
if (tcf_exts_dump(skb, &f->exts) < 0)
goto nla_put_failure;
nla_nest_end(skb, nest);
if (tcf_exts_dump_stats(skb, &f->exts) < 0)
goto nla_put_failure;
return skb->len;
nla_put_failure:
nla_nest_cancel(skb, nest);
return -1;
}
static void route4_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
unsigned long base)
{
struct route4_filter *f = fh;
tc_cls_bind_class(classid, cl, q, &f->res, base);
}
static struct tcf_proto_ops cls_route4_ops __read_mostly = {
.kind = "route",
.classify = route4_classify,
.init = route4_init,
.destroy = route4_destroy,
.get = route4_get,
.change = route4_change,
.delete = route4_delete,
.walk = route4_walk,
.dump = route4_dump,
.bind_class = route4_bind_class,
.owner = THIS_MODULE,
};
static int __init init_route4(void)
{
return register_tcf_proto_ops(&cls_route4_ops);
}
static void __exit exit_route4(void)
{
unregister_tcf_proto_ops(&cls_route4_ops);
}
module_init(init_route4)
module_exit(exit_route4)
MODULE_LICENSE("GPL");
| linux-master | net/sched/cls_route.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* net/sched/em_nbyte.c N-Byte ematch
*
* Authors: Thomas Graf <[email protected]>
*/
#include <linux/gfp.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/skbuff.h>
#include <linux/tc_ematch/tc_em_nbyte.h>
#include <net/pkt_cls.h>
struct nbyte_data {
struct tcf_em_nbyte hdr;
char pattern[];
};
static int em_nbyte_change(struct net *net, void *data, int data_len,
struct tcf_ematch *em)
{
struct tcf_em_nbyte *nbyte = data;
if (data_len < sizeof(*nbyte) ||
data_len < (sizeof(*nbyte) + nbyte->len))
return -EINVAL;
em->datalen = sizeof(*nbyte) + nbyte->len;
em->data = (unsigned long)kmemdup(data, em->datalen, GFP_KERNEL);
if (em->data == 0UL)
return -ENOMEM;
return 0;
}
static int em_nbyte_match(struct sk_buff *skb, struct tcf_ematch *em,
struct tcf_pkt_info *info)
{
struct nbyte_data *nbyte = (struct nbyte_data *) em->data;
unsigned char *ptr = tcf_get_base_ptr(skb, nbyte->hdr.layer);
ptr += nbyte->hdr.off;
if (!tcf_valid_offset(skb, ptr, nbyte->hdr.len))
return 0;
return !memcmp(ptr, nbyte->pattern, nbyte->hdr.len);
}
static struct tcf_ematch_ops em_nbyte_ops = {
.kind = TCF_EM_NBYTE,
.change = em_nbyte_change,
.match = em_nbyte_match,
.owner = THIS_MODULE,
.link = LIST_HEAD_INIT(em_nbyte_ops.link)
};
static int __init init_em_nbyte(void)
{
return tcf_em_register(&em_nbyte_ops);
}
static void __exit exit_em_nbyte(void)
{
tcf_em_unregister(&em_nbyte_ops);
}
MODULE_LICENSE("GPL");
module_init(init_em_nbyte);
module_exit(exit_em_nbyte);
MODULE_ALIAS_TCF_EMATCH(TCF_EM_NBYTE);
| linux-master | net/sched/em_nbyte.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* net/sched/sch_api.c Packet scheduler API.
*
* Authors: Alexey Kuznetsov, <[email protected]>
*
* Fixes:
*
* Rani Assaf <[email protected]> :980802: JIFFIES and CPU clock sources are repaired.
* Eduardo J. Blanco <[email protected]> :990222: kmod support
* Jamal Hadi Salim <[email protected]>: 990601: ingress support
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/kmod.h>
#include <linux/list.h>
#include <linux/hrtimer.h>
#include <linux/slab.h>
#include <linux/hashtable.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
#include <net/tc_wrapper.h>
#include <trace/events/qdisc.h>
/*
Short review.
-------------
This file consists of two interrelated parts:
1. queueing disciplines manager frontend.
2. traffic classes manager frontend.
Generally, queueing discipline ("qdisc") is a black box,
which is able to enqueue packets and to dequeue them (when
device is ready to send something) in order and at times
determined by algorithm hidden in it.
qdisc's are divided to two categories:
- "queues", which have no internal structure visible from outside.
- "schedulers", which split all the packets to "traffic classes",
using "packet classifiers" (look at cls_api.c)
In turn, classes may have child qdiscs (as rule, queues)
attached to them etc. etc. etc.
The goal of the routines in this file is to translate
information supplied by user in the form of handles
to more intelligible for kernel form, to make some sanity
checks and part of work, which is common to all qdiscs
and to provide rtnetlink notifications.
All real intelligent work is done inside qdisc modules.
Every discipline has two major routines: enqueue and dequeue.
---dequeue
dequeue usually returns a skb to send. It is allowed to return NULL,
but it does not mean that queue is empty, it just means that
discipline does not want to send anything this time.
Queue is really empty if q->q.qlen == 0.
For complicated disciplines with multiple queues q->q is not
real packet queue, but however q->q.qlen must be valid.
---enqueue
enqueue returns 0, if packet was enqueued successfully.
If packet (this one or another one) was dropped, it returns
not zero error code.
NET_XMIT_DROP - this packet dropped
Expected action: do not backoff, but wait until queue will clear.
NET_XMIT_CN - probably this packet enqueued, but another one dropped.
Expected action: backoff or ignore
Auxiliary routines:
---peek
like dequeue but without removing a packet from the queue
---reset
returns qdisc to initial state: purge all buffers, clear all
timers, counters (except for statistics) etc.
---init
initializes newly created qdisc.
---destroy
destroys resources allocated by init and during lifetime of qdisc.
---change
changes qdisc parameters.
*/
/* Protects list of registered TC modules. It is pure SMP lock. */
static DEFINE_RWLOCK(qdisc_mod_lock);
/************************************************
* Queueing disciplines manipulation. *
************************************************/
/* The list of all installed queueing disciplines. */
static struct Qdisc_ops *qdisc_base;
/* Register/unregister queueing discipline */
int register_qdisc(struct Qdisc_ops *qops)
{
struct Qdisc_ops *q, **qp;
int rc = -EEXIST;
write_lock(&qdisc_mod_lock);
for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
if (!strcmp(qops->id, q->id))
goto out;
if (qops->enqueue == NULL)
qops->enqueue = noop_qdisc_ops.enqueue;
if (qops->peek == NULL) {
if (qops->dequeue == NULL)
qops->peek = noop_qdisc_ops.peek;
else
goto out_einval;
}
if (qops->dequeue == NULL)
qops->dequeue = noop_qdisc_ops.dequeue;
if (qops->cl_ops) {
const struct Qdisc_class_ops *cops = qops->cl_ops;
if (!(cops->find && cops->walk && cops->leaf))
goto out_einval;
if (cops->tcf_block && !(cops->bind_tcf && cops->unbind_tcf))
goto out_einval;
}
qops->next = NULL;
*qp = qops;
rc = 0;
out:
write_unlock(&qdisc_mod_lock);
return rc;
out_einval:
rc = -EINVAL;
goto out;
}
EXPORT_SYMBOL(register_qdisc);
void unregister_qdisc(struct Qdisc_ops *qops)
{
struct Qdisc_ops *q, **qp;
int err = -ENOENT;
write_lock(&qdisc_mod_lock);
for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
if (q == qops)
break;
if (q) {
*qp = q->next;
q->next = NULL;
err = 0;
}
write_unlock(&qdisc_mod_lock);
WARN(err, "unregister qdisc(%s) failed\n", qops->id);
}
EXPORT_SYMBOL(unregister_qdisc);
/* Get default qdisc if not otherwise specified */
void qdisc_get_default(char *name, size_t len)
{
read_lock(&qdisc_mod_lock);
strscpy(name, default_qdisc_ops->id, len);
read_unlock(&qdisc_mod_lock);
}
static struct Qdisc_ops *qdisc_lookup_default(const char *name)
{
struct Qdisc_ops *q = NULL;
for (q = qdisc_base; q; q = q->next) {
if (!strcmp(name, q->id)) {
if (!try_module_get(q->owner))
q = NULL;
break;
}
}
return q;
}
/* Set new default qdisc to use */
int qdisc_set_default(const char *name)
{
const struct Qdisc_ops *ops;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
write_lock(&qdisc_mod_lock);
ops = qdisc_lookup_default(name);
if (!ops) {
/* Not found, drop lock and try to load module */
write_unlock(&qdisc_mod_lock);
request_module("sch_%s", name);
write_lock(&qdisc_mod_lock);
ops = qdisc_lookup_default(name);
}
if (ops) {
/* Set new default */
module_put(default_qdisc_ops->owner);
default_qdisc_ops = ops;
}
write_unlock(&qdisc_mod_lock);
return ops ? 0 : -ENOENT;
}
#ifdef CONFIG_NET_SCH_DEFAULT
/* Set default value from kernel config */
static int __init sch_default_qdisc(void)
{
return qdisc_set_default(CONFIG_DEFAULT_NET_SCH);
}
late_initcall(sch_default_qdisc);
#endif
/* We know handle. Find qdisc among all qdisc's attached to device
* (root qdisc, all its children, children of children etc.)
* Note: caller either uses rtnl or rcu_read_lock()
*/
static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
{
struct Qdisc *q;
if (!qdisc_dev(root))
return (root->handle == handle ? root : NULL);
if (!(root->flags & TCQ_F_BUILTIN) &&
root->handle == handle)
return root;
hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle,
lockdep_rtnl_is_held()) {
if (q->handle == handle)
return q;
}
return NULL;
}
void qdisc_hash_add(struct Qdisc *q, bool invisible)
{
if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
ASSERT_RTNL();
hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
if (invisible)
q->flags |= TCQ_F_INVISIBLE;
}
}
EXPORT_SYMBOL(qdisc_hash_add);
void qdisc_hash_del(struct Qdisc *q)
{
if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
ASSERT_RTNL();
hash_del_rcu(&q->hash);
}
}
EXPORT_SYMBOL(qdisc_hash_del);
struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
{
struct Qdisc *q;
if (!handle)
return NULL;
q = qdisc_match_from_root(rtnl_dereference(dev->qdisc), handle);
if (q)
goto out;
if (dev_ingress_queue(dev))
q = qdisc_match_from_root(
rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping),
handle);
out:
return q;
}
struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
{
struct netdev_queue *nq;
struct Qdisc *q;
if (!handle)
return NULL;
q = qdisc_match_from_root(rcu_dereference(dev->qdisc), handle);
if (q)
goto out;
nq = dev_ingress_queue_rcu(dev);
if (nq)
q = qdisc_match_from_root(rcu_dereference(nq->qdisc_sleeping),
handle);
out:
return q;
}
static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
{
unsigned long cl;
const struct Qdisc_class_ops *cops = p->ops->cl_ops;
if (cops == NULL)
return NULL;
cl = cops->find(p, classid);
if (cl == 0)
return NULL;
return cops->leaf(p, cl);
}
/* Find queueing discipline by name */
static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
{
struct Qdisc_ops *q = NULL;
if (kind) {
read_lock(&qdisc_mod_lock);
for (q = qdisc_base; q; q = q->next) {
if (nla_strcmp(kind, q->id) == 0) {
if (!try_module_get(q->owner))
q = NULL;
break;
}
}
read_unlock(&qdisc_mod_lock);
}
return q;
}
/* The linklayer setting were not transferred from iproute2, in older
* versions, and the rate tables lookup systems have been dropped in
* the kernel. To keep backward compatible with older iproute2 tc
* utils, we detect the linklayer setting by detecting if the rate
* table were modified.
*
* For linklayer ATM table entries, the rate table will be aligned to
* 48 bytes, thus some table entries will contain the same value. The
* mpu (min packet unit) is also encoded into the old rate table, thus
* starting from the mpu, we find low and high table entries for
* mapping this cell. If these entries contain the same value, when
* the rate tables have been modified for linklayer ATM.
*
* This is done by rounding mpu to the nearest 48 bytes cell/entry,
* and then roundup to the next cell, calc the table entry one below,
* and compare.
*/
static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
{
int low = roundup(r->mpu, 48);
int high = roundup(low+1, 48);
int cell_low = low >> r->cell_log;
int cell_high = (high >> r->cell_log) - 1;
/* rtab is too inaccurate at rates > 100Mbit/s */
if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
pr_debug("TC linklayer: Giving up ATM detection\n");
return TC_LINKLAYER_ETHERNET;
}
if ((cell_high > cell_low) && (cell_high < 256)
&& (rtab[cell_low] == rtab[cell_high])) {
pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
cell_low, cell_high, rtab[cell_high]);
return TC_LINKLAYER_ATM;
}
return TC_LINKLAYER_ETHERNET;
}
static struct qdisc_rate_table *qdisc_rtab_list;
struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
struct nlattr *tab,
struct netlink_ext_ack *extack)
{
struct qdisc_rate_table *rtab;
if (tab == NULL || r->rate == 0 ||
r->cell_log == 0 || r->cell_log >= 32 ||
nla_len(tab) != TC_RTAB_SIZE) {
NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
return NULL;
}
for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
!memcmp(&rtab->data, nla_data(tab), 1024)) {
rtab->refcnt++;
return rtab;
}
}
rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
if (rtab) {
rtab->rate = *r;
rtab->refcnt = 1;
memcpy(rtab->data, nla_data(tab), 1024);
if (r->linklayer == TC_LINKLAYER_UNAWARE)
r->linklayer = __detect_linklayer(r, rtab->data);
rtab->next = qdisc_rtab_list;
qdisc_rtab_list = rtab;
} else {
NL_SET_ERR_MSG(extack, "Failed to allocate new qdisc rate table");
}
return rtab;
}
EXPORT_SYMBOL(qdisc_get_rtab);
void qdisc_put_rtab(struct qdisc_rate_table *tab)
{
struct qdisc_rate_table *rtab, **rtabp;
if (!tab || --tab->refcnt)
return;
for (rtabp = &qdisc_rtab_list;
(rtab = *rtabp) != NULL;
rtabp = &rtab->next) {
if (rtab == tab) {
*rtabp = rtab->next;
kfree(rtab);
return;
}
}
}
EXPORT_SYMBOL(qdisc_put_rtab);
static LIST_HEAD(qdisc_stab_list);
static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
[TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
[TCA_STAB_DATA] = { .type = NLA_BINARY },
};
static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[TCA_STAB_MAX + 1];
struct qdisc_size_table *stab;
struct tc_sizespec *s;
unsigned int tsize = 0;
u16 *tab = NULL;
int err;
err = nla_parse_nested_deprecated(tb, TCA_STAB_MAX, opt, stab_policy,
extack);
if (err < 0)
return ERR_PTR(err);
if (!tb[TCA_STAB_BASE]) {
NL_SET_ERR_MSG(extack, "Size table base attribute is missing");
return ERR_PTR(-EINVAL);
}
s = nla_data(tb[TCA_STAB_BASE]);
if (s->tsize > 0) {
if (!tb[TCA_STAB_DATA]) {
NL_SET_ERR_MSG(extack, "Size table data attribute is missing");
return ERR_PTR(-EINVAL);
}
tab = nla_data(tb[TCA_STAB_DATA]);
tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
}
if (tsize != s->tsize || (!tab && tsize > 0)) {
NL_SET_ERR_MSG(extack, "Invalid size of size table");
return ERR_PTR(-EINVAL);
}
list_for_each_entry(stab, &qdisc_stab_list, list) {
if (memcmp(&stab->szopts, s, sizeof(*s)))
continue;
if (tsize > 0 &&
memcmp(stab->data, tab, flex_array_size(stab, data, tsize)))
continue;
stab->refcnt++;
return stab;
}
if (s->size_log > STAB_SIZE_LOG_MAX ||
s->cell_log > STAB_SIZE_LOG_MAX) {
NL_SET_ERR_MSG(extack, "Invalid logarithmic size of size table");
return ERR_PTR(-EINVAL);
}
stab = kmalloc(struct_size(stab, data, tsize), GFP_KERNEL);
if (!stab)
return ERR_PTR(-ENOMEM);
stab->refcnt = 1;
stab->szopts = *s;
if (tsize > 0)
memcpy(stab->data, tab, flex_array_size(stab, data, tsize));
list_add_tail(&stab->list, &qdisc_stab_list);
return stab;
}
void qdisc_put_stab(struct qdisc_size_table *tab)
{
if (!tab)
return;
if (--tab->refcnt == 0) {
list_del(&tab->list);
kfree_rcu(tab, rcu);
}
}
EXPORT_SYMBOL(qdisc_put_stab);
static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
{
struct nlattr *nest;
nest = nla_nest_start_noflag(skb, TCA_STAB);
if (nest == NULL)
goto nla_put_failure;
if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
goto nla_put_failure;
nla_nest_end(skb, nest);
return skb->len;
nla_put_failure:
return -1;
}
void __qdisc_calculate_pkt_len(struct sk_buff *skb,
const struct qdisc_size_table *stab)
{
int pkt_len, slot;
pkt_len = skb->len + stab->szopts.overhead;
if (unlikely(!stab->szopts.tsize))
goto out;
slot = pkt_len + stab->szopts.cell_align;
if (unlikely(slot < 0))
slot = 0;
slot >>= stab->szopts.cell_log;
if (likely(slot < stab->szopts.tsize))
pkt_len = stab->data[slot];
else
pkt_len = stab->data[stab->szopts.tsize - 1] *
(slot / stab->szopts.tsize) +
stab->data[slot % stab->szopts.tsize];
pkt_len <<= stab->szopts.size_log;
out:
if (unlikely(pkt_len < 1))
pkt_len = 1;
qdisc_skb_cb(skb)->pkt_len = pkt_len;
}
EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
{
if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
txt, qdisc->ops->id, qdisc->handle >> 16);
qdisc->flags |= TCQ_F_WARN_NONWC;
}
}
EXPORT_SYMBOL(qdisc_warn_nonwc);
static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
{
struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
timer);
rcu_read_lock();
__netif_schedule(qdisc_root(wd->qdisc));
rcu_read_unlock();
return HRTIMER_NORESTART;
}
void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
clockid_t clockid)
{
hrtimer_init(&wd->timer, clockid, HRTIMER_MODE_ABS_PINNED);
wd->timer.function = qdisc_watchdog;
wd->qdisc = qdisc;
}
EXPORT_SYMBOL(qdisc_watchdog_init_clockid);
void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
{
qdisc_watchdog_init_clockid(wd, qdisc, CLOCK_MONOTONIC);
}
EXPORT_SYMBOL(qdisc_watchdog_init);
void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires,
u64 delta_ns)
{
bool deactivated;
rcu_read_lock();
deactivated = test_bit(__QDISC_STATE_DEACTIVATED,
&qdisc_root_sleeping(wd->qdisc)->state);
rcu_read_unlock();
if (deactivated)
return;
if (hrtimer_is_queued(&wd->timer)) {
u64 softexpires;
softexpires = ktime_to_ns(hrtimer_get_softexpires(&wd->timer));
/* If timer is already set in [expires, expires + delta_ns],
* do not reprogram it.
*/
if (softexpires - expires <= delta_ns)
return;
}
hrtimer_start_range_ns(&wd->timer,
ns_to_ktime(expires),
delta_ns,
HRTIMER_MODE_ABS_PINNED);
}
EXPORT_SYMBOL(qdisc_watchdog_schedule_range_ns);
void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
{
hrtimer_cancel(&wd->timer);
}
EXPORT_SYMBOL(qdisc_watchdog_cancel);
static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
{
struct hlist_head *h;
unsigned int i;
h = kvmalloc_array(n, sizeof(struct hlist_head), GFP_KERNEL);
if (h != NULL) {
for (i = 0; i < n; i++)
INIT_HLIST_HEAD(&h[i]);
}
return h;
}
void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
{
struct Qdisc_class_common *cl;
struct hlist_node *next;
struct hlist_head *nhash, *ohash;
unsigned int nsize, nmask, osize;
unsigned int i, h;
/* Rehash when load factor exceeds 0.75 */
if (clhash->hashelems * 4 <= clhash->hashsize * 3)
return;
nsize = clhash->hashsize * 2;
nmask = nsize - 1;
nhash = qdisc_class_hash_alloc(nsize);
if (nhash == NULL)
return;
ohash = clhash->hash;
osize = clhash->hashsize;
sch_tree_lock(sch);
for (i = 0; i < osize; i++) {
hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
h = qdisc_class_hash(cl->classid, nmask);
hlist_add_head(&cl->hnode, &nhash[h]);
}
}
clhash->hash = nhash;
clhash->hashsize = nsize;
clhash->hashmask = nmask;
sch_tree_unlock(sch);
kvfree(ohash);
}
EXPORT_SYMBOL(qdisc_class_hash_grow);
int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
{
unsigned int size = 4;
clhash->hash = qdisc_class_hash_alloc(size);
if (!clhash->hash)
return -ENOMEM;
clhash->hashsize = size;
clhash->hashmask = size - 1;
clhash->hashelems = 0;
return 0;
}
EXPORT_SYMBOL(qdisc_class_hash_init);
void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
{
kvfree(clhash->hash);
}
EXPORT_SYMBOL(qdisc_class_hash_destroy);
void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
struct Qdisc_class_common *cl)
{
unsigned int h;
INIT_HLIST_NODE(&cl->hnode);
h = qdisc_class_hash(cl->classid, clhash->hashmask);
hlist_add_head(&cl->hnode, &clhash->hash[h]);
clhash->hashelems++;
}
EXPORT_SYMBOL(qdisc_class_hash_insert);
void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
struct Qdisc_class_common *cl)
{
hlist_del(&cl->hnode);
clhash->hashelems--;
}
EXPORT_SYMBOL(qdisc_class_hash_remove);
/* Allocate an unique handle from space managed by kernel
* Possible range is [8000-FFFF]:0000 (0x8000 values)
*/
static u32 qdisc_alloc_handle(struct net_device *dev)
{
int i = 0x8000;
static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
do {
autohandle += TC_H_MAKE(0x10000U, 0);
if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
autohandle = TC_H_MAKE(0x80000000U, 0);
if (!qdisc_lookup(dev, autohandle))
return autohandle;
cond_resched();
} while (--i > 0);
return 0;
}
void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
{
bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED;
const struct Qdisc_class_ops *cops;
unsigned long cl;
u32 parentid;
bool notify;
int drops;
if (n == 0 && len == 0)
return;
drops = max_t(int, n, 0);
rcu_read_lock();
while ((parentid = sch->parent)) {
if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
break;
if (sch->flags & TCQ_F_NOPARENT)
break;
/* Notify parent qdisc only if child qdisc becomes empty.
*
* If child was empty even before update then backlog
* counter is screwed and we skip notification because
* parent class is already passive.
*
* If the original child was offloaded then it is allowed
* to be seem as empty, so the parent is notified anyway.
*/
notify = !sch->q.qlen && !WARN_ON_ONCE(!n &&
!qdisc_is_offloaded);
/* TODO: perform the search on a per txq basis */
sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
if (sch == NULL) {
WARN_ON_ONCE(parentid != TC_H_ROOT);
break;
}
cops = sch->ops->cl_ops;
if (notify && cops->qlen_notify) {
cl = cops->find(sch, parentid);
cops->qlen_notify(sch, cl);
}
sch->q.qlen -= n;
sch->qstats.backlog -= len;
__qdisc_qstats_drop(sch, drops);
}
rcu_read_unlock();
}
EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
int qdisc_offload_dump_helper(struct Qdisc *sch, enum tc_setup_type type,
void *type_data)
{
struct net_device *dev = qdisc_dev(sch);
int err;
sch->flags &= ~TCQ_F_OFFLOADED;
if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
return 0;
err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
if (err == -EOPNOTSUPP)
return 0;
if (!err)
sch->flags |= TCQ_F_OFFLOADED;
return err;
}
EXPORT_SYMBOL(qdisc_offload_dump_helper);
void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
struct Qdisc *new, struct Qdisc *old,
enum tc_setup_type type, void *type_data,
struct netlink_ext_ack *extack)
{
bool any_qdisc_is_offloaded;
int err;
if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
return;
err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
/* Don't report error if the graft is part of destroy operation. */
if (!err || !new || new == &noop_qdisc)
return;
/* Don't report error if the parent, the old child and the new
* one are not offloaded.
*/
any_qdisc_is_offloaded = new->flags & TCQ_F_OFFLOADED;
any_qdisc_is_offloaded |= sch && sch->flags & TCQ_F_OFFLOADED;
any_qdisc_is_offloaded |= old && old->flags & TCQ_F_OFFLOADED;
if (any_qdisc_is_offloaded)
NL_SET_ERR_MSG(extack, "Offloading graft operation failed.");
}
EXPORT_SYMBOL(qdisc_offload_graft_helper);
void qdisc_offload_query_caps(struct net_device *dev,
enum tc_setup_type type,
void *caps, size_t caps_len)
{
const struct net_device_ops *ops = dev->netdev_ops;
struct tc_query_caps_base base = {
.type = type,
.caps = caps,
};
memset(caps, 0, caps_len);
if (ops->ndo_setup_tc)
ops->ndo_setup_tc(dev, TC_QUERY_CAPS, &base);
}
EXPORT_SYMBOL(qdisc_offload_query_caps);
static void qdisc_offload_graft_root(struct net_device *dev,
struct Qdisc *new, struct Qdisc *old,
struct netlink_ext_ack *extack)
{
struct tc_root_qopt_offload graft_offload = {
.command = TC_ROOT_GRAFT,
.handle = new ? new->handle : 0,
.ingress = (new && new->flags & TCQ_F_INGRESS) ||
(old && old->flags & TCQ_F_INGRESS),
};
qdisc_offload_graft_helper(dev, NULL, new, old,
TC_SETUP_ROOT_QDISC, &graft_offload, extack);
}
static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
u32 portid, u32 seq, u16 flags, int event,
struct netlink_ext_ack *extack)
{
struct gnet_stats_basic_sync __percpu *cpu_bstats = NULL;
struct gnet_stats_queue __percpu *cpu_qstats = NULL;
struct tcmsg *tcm;
struct nlmsghdr *nlh;
unsigned char *b = skb_tail_pointer(skb);
struct gnet_dump d;
struct qdisc_size_table *stab;
u32 block_index;
__u32 qlen;
cond_resched();
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
if (!nlh)
goto out_nlmsg_trim;
tcm = nlmsg_data(nlh);
tcm->tcm_family = AF_UNSPEC;
tcm->tcm__pad1 = 0;
tcm->tcm__pad2 = 0;
tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
tcm->tcm_parent = clid;
tcm->tcm_handle = q->handle;
tcm->tcm_info = refcount_read(&q->refcnt);
if (nla_put_string(skb, TCA_KIND, q->ops->id))
goto nla_put_failure;
if (q->ops->ingress_block_get) {
block_index = q->ops->ingress_block_get(q);
if (block_index &&
nla_put_u32(skb, TCA_INGRESS_BLOCK, block_index))
goto nla_put_failure;
}
if (q->ops->egress_block_get) {
block_index = q->ops->egress_block_get(q);
if (block_index &&
nla_put_u32(skb, TCA_EGRESS_BLOCK, block_index))
goto nla_put_failure;
}
if (q->ops->dump && q->ops->dump(q, skb) < 0)
goto nla_put_failure;
if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
goto nla_put_failure;
qlen = qdisc_qlen_sum(q);
stab = rtnl_dereference(q->stab);
if (stab && qdisc_dump_stab(skb, stab) < 0)
goto nla_put_failure;
if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
NULL, &d, TCA_PAD) < 0)
goto nla_put_failure;
if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
goto nla_put_failure;
if (qdisc_is_percpu_stats(q)) {
cpu_bstats = q->cpu_bstats;
cpu_qstats = q->cpu_qstats;
}
if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats, true) < 0 ||
gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
goto nla_put_failure;
if (gnet_stats_finish_copy(&d) < 0)
goto nla_put_failure;
if (extack && extack->_msg &&
nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
goto out_nlmsg_trim;
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
return skb->len;
out_nlmsg_trim:
nla_put_failure:
nlmsg_trim(skb, b);
return -1;
}
static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
{
if (q->flags & TCQ_F_BUILTIN)
return true;
if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible)
return true;
return false;
}
static int qdisc_notify(struct net *net, struct sk_buff *oskb,
struct nlmsghdr *n, u32 clid,
struct Qdisc *old, struct Qdisc *new,
struct netlink_ext_ack *extack)
{
struct sk_buff *skb;
u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb)
return -ENOBUFS;
if (old && !tc_qdisc_dump_ignore(old, false)) {
if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
0, RTM_DELQDISC, extack) < 0)
goto err_out;
}
if (new && !tc_qdisc_dump_ignore(new, false)) {
if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
old ? NLM_F_REPLACE : 0, RTM_NEWQDISC, extack) < 0)
goto err_out;
}
if (skb->len)
return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
n->nlmsg_flags & NLM_F_ECHO);
err_out:
kfree_skb(skb);
return -EINVAL;
}
static void notify_and_destroy(struct net *net, struct sk_buff *skb,
struct nlmsghdr *n, u32 clid,
struct Qdisc *old, struct Qdisc *new,
struct netlink_ext_ack *extack)
{
if (new || old)
qdisc_notify(net, skb, n, clid, old, new, extack);
if (old)
qdisc_put(old);
}
static void qdisc_clear_nolock(struct Qdisc *sch)
{
sch->flags &= ~TCQ_F_NOLOCK;
if (!(sch->flags & TCQ_F_CPUSTATS))
return;
free_percpu(sch->cpu_bstats);
free_percpu(sch->cpu_qstats);
sch->cpu_bstats = NULL;
sch->cpu_qstats = NULL;
sch->flags &= ~TCQ_F_CPUSTATS;
}
/* Graft qdisc "new" to class "classid" of qdisc "parent" or
* to device "dev".
*
* When appropriate send a netlink notification using 'skb'
* and "n".
*
* On success, destroy old qdisc.
*/
static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
struct Qdisc *new, struct Qdisc *old,
struct netlink_ext_ack *extack)
{
struct Qdisc *q = old;
struct net *net = dev_net(dev);
if (parent == NULL) {
unsigned int i, num_q, ingress;
struct netdev_queue *dev_queue;
ingress = 0;
num_q = dev->num_tx_queues;
if ((q && q->flags & TCQ_F_INGRESS) ||
(new && new->flags & TCQ_F_INGRESS)) {
ingress = 1;
dev_queue = dev_ingress_queue(dev);
if (!dev_queue) {
NL_SET_ERR_MSG(extack, "Device does not have an ingress queue");
return -ENOENT;
}
q = rtnl_dereference(dev_queue->qdisc_sleeping);
/* This is the counterpart of that qdisc_refcount_inc_nz() call in
* __tcf_qdisc_find() for filter requests.
*/
if (!qdisc_refcount_dec_if_one(q)) {
NL_SET_ERR_MSG(extack,
"Current ingress or clsact Qdisc has ongoing filter requests");
return -EBUSY;
}
}
if (dev->flags & IFF_UP)
dev_deactivate(dev);
qdisc_offload_graft_root(dev, new, old, extack);
if (new && new->ops->attach && !ingress)
goto skip;
if (!ingress) {
for (i = 0; i < num_q; i++) {
dev_queue = netdev_get_tx_queue(dev, i);
old = dev_graft_qdisc(dev_queue, new);
if (new && i > 0)
qdisc_refcount_inc(new);
qdisc_put(old);
}
} else {
old = dev_graft_qdisc(dev_queue, NULL);
/* {ingress,clsact}_destroy() @old before grafting @new to avoid
* unprotected concurrent accesses to net_device::miniq_{in,e}gress
* pointer(s) in mini_qdisc_pair_swap().
*/
qdisc_notify(net, skb, n, classid, old, new, extack);
qdisc_destroy(old);
dev_graft_qdisc(dev_queue, new);
}
skip:
if (!ingress) {
old = rtnl_dereference(dev->qdisc);
if (new && !new->ops->attach)
qdisc_refcount_inc(new);
rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc);
notify_and_destroy(net, skb, n, classid, old, new, extack);
if (new && new->ops->attach)
new->ops->attach(new);
}
if (dev->flags & IFF_UP)
dev_activate(dev);
} else {
const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
unsigned long cl;
int err;
/* Only support running class lockless if parent is lockless */
if (new && (new->flags & TCQ_F_NOLOCK) && !(parent->flags & TCQ_F_NOLOCK))
qdisc_clear_nolock(new);
if (!cops || !cops->graft)
return -EOPNOTSUPP;
cl = cops->find(parent, classid);
if (!cl) {
NL_SET_ERR_MSG(extack, "Specified class not found");
return -ENOENT;
}
if (new && new->ops == &noqueue_qdisc_ops) {
NL_SET_ERR_MSG(extack, "Cannot assign noqueue to a class");
return -EINVAL;
}
err = cops->graft(parent, cl, new, &old, extack);
if (err)
return err;
notify_and_destroy(net, skb, n, classid, old, new, extack);
}
return 0;
}
static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca,
struct netlink_ext_ack *extack)
{
u32 block_index;
if (tca[TCA_INGRESS_BLOCK]) {
block_index = nla_get_u32(tca[TCA_INGRESS_BLOCK]);
if (!block_index) {
NL_SET_ERR_MSG(extack, "Ingress block index cannot be 0");
return -EINVAL;
}
if (!sch->ops->ingress_block_set) {
NL_SET_ERR_MSG(extack, "Ingress block sharing is not supported");
return -EOPNOTSUPP;
}
sch->ops->ingress_block_set(sch, block_index);
}
if (tca[TCA_EGRESS_BLOCK]) {
block_index = nla_get_u32(tca[TCA_EGRESS_BLOCK]);
if (!block_index) {
NL_SET_ERR_MSG(extack, "Egress block index cannot be 0");
return -EINVAL;
}
if (!sch->ops->egress_block_set) {
NL_SET_ERR_MSG(extack, "Egress block sharing is not supported");
return -EOPNOTSUPP;
}
sch->ops->egress_block_set(sch, block_index);
}
return 0;
}
/*
Allocate and initialize new qdisc.
Parameters are passed via opt.
*/
static struct Qdisc *qdisc_create(struct net_device *dev,
struct netdev_queue *dev_queue,
u32 parent, u32 handle,
struct nlattr **tca, int *errp,
struct netlink_ext_ack *extack)
{
int err;
struct nlattr *kind = tca[TCA_KIND];
struct Qdisc *sch;
struct Qdisc_ops *ops;
struct qdisc_size_table *stab;
ops = qdisc_lookup_ops(kind);
#ifdef CONFIG_MODULES
if (ops == NULL && kind != NULL) {
char name[IFNAMSIZ];
if (nla_strscpy(name, kind, IFNAMSIZ) >= 0) {
/* We dropped the RTNL semaphore in order to
* perform the module load. So, even if we
* succeeded in loading the module we have to
* tell the caller to replay the request. We
* indicate this using -EAGAIN.
* We replay the request because the device may
* go away in the mean time.
*/
rtnl_unlock();
request_module("sch_%s", name);
rtnl_lock();
ops = qdisc_lookup_ops(kind);
if (ops != NULL) {
/* We will try again qdisc_lookup_ops,
* so don't keep a reference.
*/
module_put(ops->owner);
err = -EAGAIN;
goto err_out;
}
}
}
#endif
err = -ENOENT;
if (!ops) {
NL_SET_ERR_MSG(extack, "Specified qdisc kind is unknown");
goto err_out;
}
sch = qdisc_alloc(dev_queue, ops, extack);
if (IS_ERR(sch)) {
err = PTR_ERR(sch);
goto err_out2;
}
sch->parent = parent;
if (handle == TC_H_INGRESS) {
if (!(sch->flags & TCQ_F_INGRESS)) {
NL_SET_ERR_MSG(extack,
"Specified parent ID is reserved for ingress and clsact Qdiscs");
err = -EINVAL;
goto err_out3;
}
handle = TC_H_MAKE(TC_H_INGRESS, 0);
} else {
if (handle == 0) {
handle = qdisc_alloc_handle(dev);
if (handle == 0) {
NL_SET_ERR_MSG(extack, "Maximum number of qdisc handles was exceeded");
err = -ENOSPC;
goto err_out3;
}
}
if (!netif_is_multiqueue(dev))
sch->flags |= TCQ_F_ONETXQUEUE;
}
sch->handle = handle;
/* This exist to keep backward compatible with a userspace
* loophole, what allowed userspace to get IFF_NO_QUEUE
* facility on older kernels by setting tx_queue_len=0 (prior
* to qdisc init), and then forgot to reinit tx_queue_len
* before again attaching a qdisc.
*/
if ((dev->priv_flags & IFF_NO_QUEUE) && (dev->tx_queue_len == 0)) {
dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
netdev_info(dev, "Caught tx_queue_len zero misconfig\n");
}
err = qdisc_block_indexes_set(sch, tca, extack);
if (err)
goto err_out3;
if (tca[TCA_STAB]) {
stab = qdisc_get_stab(tca[TCA_STAB], extack);
if (IS_ERR(stab)) {
err = PTR_ERR(stab);
goto err_out3;
}
rcu_assign_pointer(sch->stab, stab);
}
if (ops->init) {
err = ops->init(sch, tca[TCA_OPTIONS], extack);
if (err != 0)
goto err_out4;
}
if (tca[TCA_RATE]) {
err = -EOPNOTSUPP;
if (sch->flags & TCQ_F_MQROOT) {
NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc");
goto err_out4;
}
err = gen_new_estimator(&sch->bstats,
sch->cpu_bstats,
&sch->rate_est,
NULL,
true,
tca[TCA_RATE]);
if (err) {
NL_SET_ERR_MSG(extack, "Failed to generate new estimator");
goto err_out4;
}
}
qdisc_hash_add(sch, false);
trace_qdisc_create(ops, dev, parent);
return sch;
err_out4:
/* Even if ops->init() failed, we call ops->destroy()
* like qdisc_create_dflt().
*/
if (ops->destroy)
ops->destroy(sch);
qdisc_put_stab(rtnl_dereference(sch->stab));
err_out3:
netdev_put(dev, &sch->dev_tracker);
qdisc_free(sch);
err_out2:
module_put(ops->owner);
err_out:
*errp = err;
return NULL;
}
static int qdisc_change(struct Qdisc *sch, struct nlattr **tca,
struct netlink_ext_ack *extack)
{
struct qdisc_size_table *ostab, *stab = NULL;
int err = 0;
if (tca[TCA_OPTIONS]) {
if (!sch->ops->change) {
NL_SET_ERR_MSG(extack, "Change operation not supported by specified qdisc");
return -EINVAL;
}
if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
return -EOPNOTSUPP;
}
err = sch->ops->change(sch, tca[TCA_OPTIONS], extack);
if (err)
return err;
}
if (tca[TCA_STAB]) {
stab = qdisc_get_stab(tca[TCA_STAB], extack);
if (IS_ERR(stab))
return PTR_ERR(stab);
}
ostab = rtnl_dereference(sch->stab);
rcu_assign_pointer(sch->stab, stab);
qdisc_put_stab(ostab);
if (tca[TCA_RATE]) {
/* NB: ignores errors from replace_estimator
because change can't be undone. */
if (sch->flags & TCQ_F_MQROOT)
goto out;
gen_replace_estimator(&sch->bstats,
sch->cpu_bstats,
&sch->rate_est,
NULL,
true,
tca[TCA_RATE]);
}
out:
return 0;
}
struct check_loop_arg {
struct qdisc_walker w;
struct Qdisc *p;
int depth;
};
static int check_loop_fn(struct Qdisc *q, unsigned long cl,
struct qdisc_walker *w);
static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
{
struct check_loop_arg arg;
if (q->ops->cl_ops == NULL)
return 0;
arg.w.stop = arg.w.skip = arg.w.count = 0;
arg.w.fn = check_loop_fn;
arg.depth = depth;
arg.p = p;
q->ops->cl_ops->walk(q, &arg.w);
return arg.w.stop ? -ELOOP : 0;
}
static int
check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
{
struct Qdisc *leaf;
const struct Qdisc_class_ops *cops = q->ops->cl_ops;
struct check_loop_arg *arg = (struct check_loop_arg *)w;
leaf = cops->leaf(q, cl);
if (leaf) {
if (leaf == arg->p || arg->depth > 7)
return -ELOOP;
return check_loop(leaf, arg->p, arg->depth + 1);
}
return 0;
}
const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
[TCA_KIND] = { .type = NLA_STRING },
[TCA_RATE] = { .type = NLA_BINARY,
.len = sizeof(struct tc_estimator) },
[TCA_STAB] = { .type = NLA_NESTED },
[TCA_DUMP_INVISIBLE] = { .type = NLA_FLAG },
[TCA_CHAIN] = { .type = NLA_U32 },
[TCA_INGRESS_BLOCK] = { .type = NLA_U32 },
[TCA_EGRESS_BLOCK] = { .type = NLA_U32 },
};
/*
* Delete/get qdisc.
*/
static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct tcmsg *tcm = nlmsg_data(n);
struct nlattr *tca[TCA_MAX + 1];
struct net_device *dev;
u32 clid;
struct Qdisc *q = NULL;
struct Qdisc *p = NULL;
int err;
err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
rtm_tca_policy, extack);
if (err < 0)
return err;
dev = __dev_get_by_index(net, tcm->tcm_ifindex);
if (!dev)
return -ENODEV;
clid = tcm->tcm_parent;
if (clid) {
if (clid != TC_H_ROOT) {
if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
p = qdisc_lookup(dev, TC_H_MAJ(clid));
if (!p) {
NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid");
return -ENOENT;
}
q = qdisc_leaf(p, clid);
} else if (dev_ingress_queue(dev)) {
q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping);
}
} else {
q = rtnl_dereference(dev->qdisc);
}
if (!q) {
NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
return -ENOENT;
}
if (tcm->tcm_handle && q->handle != tcm->tcm_handle) {
NL_SET_ERR_MSG(extack, "Invalid handle");
return -EINVAL;
}
} else {
q = qdisc_lookup(dev, tcm->tcm_handle);
if (!q) {
NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified handle");
return -ENOENT;
}
}
if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
NL_SET_ERR_MSG(extack, "Invalid qdisc name");
return -EINVAL;
}
if (n->nlmsg_type == RTM_DELQDISC) {
if (!clid) {
NL_SET_ERR_MSG(extack, "Classid cannot be zero");
return -EINVAL;
}
if (q->handle == 0) {
NL_SET_ERR_MSG(extack, "Cannot delete qdisc with handle of zero");
return -ENOENT;
}
err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack);
if (err != 0)
return err;
} else {
qdisc_notify(net, skb, n, clid, NULL, q, NULL);
}
return 0;
}
static bool req_create_or_replace(struct nlmsghdr *n)
{
return (n->nlmsg_flags & NLM_F_CREATE &&
n->nlmsg_flags & NLM_F_REPLACE);
}
static bool req_create_exclusive(struct nlmsghdr *n)
{
return (n->nlmsg_flags & NLM_F_CREATE &&
n->nlmsg_flags & NLM_F_EXCL);
}
static bool req_change(struct nlmsghdr *n)
{
return (!(n->nlmsg_flags & NLM_F_CREATE) &&
!(n->nlmsg_flags & NLM_F_REPLACE) &&
!(n->nlmsg_flags & NLM_F_EXCL));
}
/*
* Create/change qdisc.
*/
static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct tcmsg *tcm;
struct nlattr *tca[TCA_MAX + 1];
struct net_device *dev;
u32 clid;
struct Qdisc *q, *p;
int err;
replay:
/* Reinit, just in case something touches this. */
err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
rtm_tca_policy, extack);
if (err < 0)
return err;
tcm = nlmsg_data(n);
clid = tcm->tcm_parent;
q = p = NULL;
dev = __dev_get_by_index(net, tcm->tcm_ifindex);
if (!dev)
return -ENODEV;
if (clid) {
if (clid != TC_H_ROOT) {
if (clid != TC_H_INGRESS) {
p = qdisc_lookup(dev, TC_H_MAJ(clid));
if (!p) {
NL_SET_ERR_MSG(extack, "Failed to find specified qdisc");
return -ENOENT;
}
q = qdisc_leaf(p, clid);
} else if (dev_ingress_queue_create(dev)) {
q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping);
}
} else {
q = rtnl_dereference(dev->qdisc);
}
/* It may be default qdisc, ignore it */
if (q && q->handle == 0)
q = NULL;
if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
if (tcm->tcm_handle) {
if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) {
NL_SET_ERR_MSG(extack, "NLM_F_REPLACE needed to override");
return -EEXIST;
}
if (TC_H_MIN(tcm->tcm_handle)) {
NL_SET_ERR_MSG(extack, "Invalid minor handle");
return -EINVAL;
}
q = qdisc_lookup(dev, tcm->tcm_handle);
if (!q)
goto create_n_graft;
if (n->nlmsg_flags & NLM_F_EXCL) {
NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override");
return -EEXIST;
}
if (tca[TCA_KIND] &&
nla_strcmp(tca[TCA_KIND], q->ops->id)) {
NL_SET_ERR_MSG(extack, "Invalid qdisc name");
return -EINVAL;
}
if (q->flags & TCQ_F_INGRESS) {
NL_SET_ERR_MSG(extack,
"Cannot regraft ingress or clsact Qdiscs");
return -EINVAL;
}
if (q == p ||
(p && check_loop(q, p, 0))) {
NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected");
return -ELOOP;
}
if (clid == TC_H_INGRESS) {
NL_SET_ERR_MSG(extack, "Ingress cannot graft directly");
return -EINVAL;
}
qdisc_refcount_inc(q);
goto graft;
} else {
if (!q)
goto create_n_graft;
/* This magic test requires explanation.
*
* We know, that some child q is already
* attached to this parent and have choice:
* 1) change it or 2) create/graft new one.
* If the requested qdisc kind is different
* than the existing one, then we choose graft.
* If they are the same then this is "change"
* operation - just let it fallthrough..
*
* 1. We are allowed to create/graft only
* if the request is explicitly stating
* "please create if it doesn't exist".
*
* 2. If the request is to exclusive create
* then the qdisc tcm_handle is not expected
* to exist, so that we choose create/graft too.
*
* 3. The last case is when no flags are set.
* This will happen when for example tc
* utility issues a "change" command.
* Alas, it is sort of hole in API, we
* cannot decide what to do unambiguously.
* For now we select create/graft.
*/
if (tca[TCA_KIND] &&
nla_strcmp(tca[TCA_KIND], q->ops->id)) {
if (req_create_or_replace(n) ||
req_create_exclusive(n))
goto create_n_graft;
else if (req_change(n))
goto create_n_graft2;
}
}
}
} else {
if (!tcm->tcm_handle) {
NL_SET_ERR_MSG(extack, "Handle cannot be zero");
return -EINVAL;
}
q = qdisc_lookup(dev, tcm->tcm_handle);
}
/* Change qdisc parameters */
if (!q) {
NL_SET_ERR_MSG(extack, "Specified qdisc not found");
return -ENOENT;
}
if (n->nlmsg_flags & NLM_F_EXCL) {
NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot modify");
return -EEXIST;
}
if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
NL_SET_ERR_MSG(extack, "Invalid qdisc name");
return -EINVAL;
}
err = qdisc_change(q, tca, extack);
if (err == 0)
qdisc_notify(net, skb, n, clid, NULL, q, extack);
return err;
create_n_graft:
if (!(n->nlmsg_flags & NLM_F_CREATE)) {
NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
return -ENOENT;
}
create_n_graft2:
if (clid == TC_H_INGRESS) {
if (dev_ingress_queue(dev)) {
q = qdisc_create(dev, dev_ingress_queue(dev),
tcm->tcm_parent, tcm->tcm_parent,
tca, &err, extack);
} else {
NL_SET_ERR_MSG(extack, "Cannot find ingress queue for specified device");
err = -ENOENT;
}
} else {
struct netdev_queue *dev_queue;
if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
dev_queue = p->ops->cl_ops->select_queue(p, tcm);
else if (p)
dev_queue = p->dev_queue;
else
dev_queue = netdev_get_tx_queue(dev, 0);
q = qdisc_create(dev, dev_queue,
tcm->tcm_parent, tcm->tcm_handle,
tca, &err, extack);
}
if (q == NULL) {
if (err == -EAGAIN)
goto replay;
return err;
}
graft:
err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
if (err) {
if (q)
qdisc_put(q);
return err;
}
return 0;
}
static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
struct netlink_callback *cb,
int *q_idx_p, int s_q_idx, bool recur,
bool dump_invisible)
{
int ret = 0, q_idx = *q_idx_p;
struct Qdisc *q;
int b;
if (!root)
return 0;
q = root;
if (q_idx < s_q_idx) {
q_idx++;
} else {
if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
RTM_NEWQDISC, NULL) <= 0)
goto done;
q_idx++;
}
/* If dumping singletons, there is no qdisc_dev(root) and the singleton
* itself has already been dumped.
*
* If we've already dumped the top-level (ingress) qdisc above and the global
* qdisc hashtable, we don't want to hit it again
*/
if (!qdisc_dev(root) || !recur)
goto out;
hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
if (q_idx < s_q_idx) {
q_idx++;
continue;
}
if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
RTM_NEWQDISC, NULL) <= 0)
goto done;
q_idx++;
}
out:
*q_idx_p = q_idx;
return ret;
done:
ret = -1;
goto out;
}
static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
int idx, q_idx;
int s_idx, s_q_idx;
struct net_device *dev;
const struct nlmsghdr *nlh = cb->nlh;
struct nlattr *tca[TCA_MAX + 1];
int err;
s_idx = cb->args[0];
s_q_idx = q_idx = cb->args[1];
idx = 0;
ASSERT_RTNL();
err = nlmsg_parse_deprecated(nlh, sizeof(struct tcmsg), tca, TCA_MAX,
rtm_tca_policy, cb->extack);
if (err < 0)
return err;
for_each_netdev(net, dev) {
struct netdev_queue *dev_queue;
if (idx < s_idx)
goto cont;
if (idx > s_idx)
s_q_idx = 0;
q_idx = 0;
if (tc_dump_qdisc_root(rtnl_dereference(dev->qdisc),
skb, cb, &q_idx, s_q_idx,
true, tca[TCA_DUMP_INVISIBLE]) < 0)
goto done;
dev_queue = dev_ingress_queue(dev);
if (dev_queue &&
tc_dump_qdisc_root(rtnl_dereference(dev_queue->qdisc_sleeping),
skb, cb, &q_idx, s_q_idx, false,
tca[TCA_DUMP_INVISIBLE]) < 0)
goto done;
cont:
idx++;
}
done:
cb->args[0] = idx;
cb->args[1] = q_idx;
return skb->len;
}
/************************************************
* Traffic classes manipulation. *
************************************************/
static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
unsigned long cl, u32 portid, u32 seq, u16 flags,
int event, struct netlink_ext_ack *extack)
{
struct tcmsg *tcm;
struct nlmsghdr *nlh;
unsigned char *b = skb_tail_pointer(skb);
struct gnet_dump d;
const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
cond_resched();
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
if (!nlh)
goto out_nlmsg_trim;
tcm = nlmsg_data(nlh);
tcm->tcm_family = AF_UNSPEC;
tcm->tcm__pad1 = 0;
tcm->tcm__pad2 = 0;
tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
tcm->tcm_parent = q->handle;
tcm->tcm_handle = q->handle;
tcm->tcm_info = 0;
if (nla_put_string(skb, TCA_KIND, q->ops->id))
goto nla_put_failure;
if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
goto nla_put_failure;
if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
NULL, &d, TCA_PAD) < 0)
goto nla_put_failure;
if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
goto nla_put_failure;
if (gnet_stats_finish_copy(&d) < 0)
goto nla_put_failure;
if (extack && extack->_msg &&
nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
goto out_nlmsg_trim;
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
return skb->len;
out_nlmsg_trim:
nla_put_failure:
nlmsg_trim(skb, b);
return -1;
}
static int tclass_notify(struct net *net, struct sk_buff *oskb,
struct nlmsghdr *n, struct Qdisc *q,
unsigned long cl, int event, struct netlink_ext_ack *extack)
{
struct sk_buff *skb;
u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb)
return -ENOBUFS;
if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event, extack) < 0) {
kfree_skb(skb);
return -EINVAL;
}
return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
n->nlmsg_flags & NLM_F_ECHO);
}
static int tclass_del_notify(struct net *net,
const struct Qdisc_class_ops *cops,
struct sk_buff *oskb, struct nlmsghdr *n,
struct Qdisc *q, unsigned long cl,
struct netlink_ext_ack *extack)
{
u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
struct sk_buff *skb;
int err = 0;
if (!cops->delete)
return -EOPNOTSUPP;
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb)
return -ENOBUFS;
if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0,
RTM_DELTCLASS, extack) < 0) {
kfree_skb(skb);
return -EINVAL;
}
err = cops->delete(q, cl, extack);
if (err) {
kfree_skb(skb);
return err;
}
err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
n->nlmsg_flags & NLM_F_ECHO);
return err;
}
#ifdef CONFIG_NET_CLS
struct tcf_bind_args {
struct tcf_walker w;
unsigned long base;
unsigned long cl;
u32 classid;
};
static int tcf_node_bind(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
{
struct tcf_bind_args *a = (void *)arg;
if (n && tp->ops->bind_class) {
struct Qdisc *q = tcf_block_q(tp->chain->block);
sch_tree_lock(q);
tp->ops->bind_class(n, a->classid, a->cl, q, a->base);
sch_tree_unlock(q);
}
return 0;
}
struct tc_bind_class_args {
struct qdisc_walker w;
unsigned long new_cl;
u32 portid;
u32 clid;
};
static int tc_bind_class_walker(struct Qdisc *q, unsigned long cl,
struct qdisc_walker *w)
{
struct tc_bind_class_args *a = (struct tc_bind_class_args *)w;
const struct Qdisc_class_ops *cops = q->ops->cl_ops;
struct tcf_block *block;
struct tcf_chain *chain;
block = cops->tcf_block(q, cl, NULL);
if (!block)
return 0;
for (chain = tcf_get_next_chain(block, NULL);
chain;
chain = tcf_get_next_chain(block, chain)) {
struct tcf_proto *tp;
for (tp = tcf_get_next_proto(chain, NULL);
tp; tp = tcf_get_next_proto(chain, tp)) {
struct tcf_bind_args arg = {};
arg.w.fn = tcf_node_bind;
arg.classid = a->clid;
arg.base = cl;
arg.cl = a->new_cl;
tp->ops->walk(tp, &arg.w, true);
}
}
return 0;
}
static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
unsigned long new_cl)
{
const struct Qdisc_class_ops *cops = q->ops->cl_ops;
struct tc_bind_class_args args = {};
if (!cops->tcf_block)
return;
args.portid = portid;
args.clid = clid;
args.new_cl = new_cl;
args.w.fn = tc_bind_class_walker;
q->ops->cl_ops->walk(q, &args.w);
}
#else
static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
unsigned long new_cl)
{
}
#endif
static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct tcmsg *tcm = nlmsg_data(n);
struct nlattr *tca[TCA_MAX + 1];
struct net_device *dev;
struct Qdisc *q = NULL;
const struct Qdisc_class_ops *cops;
unsigned long cl = 0;
unsigned long new_cl;
u32 portid;
u32 clid;
u32 qid;
int err;
err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
rtm_tca_policy, extack);
if (err < 0)
return err;
dev = __dev_get_by_index(net, tcm->tcm_ifindex);
if (!dev)
return -ENODEV;
/*
parent == TC_H_UNSPEC - unspecified parent.
parent == TC_H_ROOT - class is root, which has no parent.
parent == X:0 - parent is root class.
parent == X:Y - parent is a node in hierarchy.
parent == 0:Y - parent is X:Y, where X:0 is qdisc.
handle == 0:0 - generate handle from kernel pool.
handle == 0:Y - class is X:Y, where X:0 is qdisc.
handle == X:Y - clear.
handle == X:0 - root class.
*/
/* Step 1. Determine qdisc handle X:0 */
portid = tcm->tcm_parent;
clid = tcm->tcm_handle;
qid = TC_H_MAJ(clid);
if (portid != TC_H_ROOT) {
u32 qid1 = TC_H_MAJ(portid);
if (qid && qid1) {
/* If both majors are known, they must be identical. */
if (qid != qid1)
return -EINVAL;
} else if (qid1) {
qid = qid1;
} else if (qid == 0)
qid = rtnl_dereference(dev->qdisc)->handle;
/* Now qid is genuine qdisc handle consistent
* both with parent and child.
*
* TC_H_MAJ(portid) still may be unspecified, complete it now.
*/
if (portid)
portid = TC_H_MAKE(qid, portid);
} else {
if (qid == 0)
qid = rtnl_dereference(dev->qdisc)->handle;
}
/* OK. Locate qdisc */
q = qdisc_lookup(dev, qid);
if (!q)
return -ENOENT;
/* An check that it supports classes */
cops = q->ops->cl_ops;
if (cops == NULL)
return -EINVAL;
/* Now try to get class */
if (clid == 0) {
if (portid == TC_H_ROOT)
clid = qid;
} else
clid = TC_H_MAKE(qid, clid);
if (clid)
cl = cops->find(q, clid);
if (cl == 0) {
err = -ENOENT;
if (n->nlmsg_type != RTM_NEWTCLASS ||
!(n->nlmsg_flags & NLM_F_CREATE))
goto out;
} else {
switch (n->nlmsg_type) {
case RTM_NEWTCLASS:
err = -EEXIST;
if (n->nlmsg_flags & NLM_F_EXCL)
goto out;
break;
case RTM_DELTCLASS:
err = tclass_del_notify(net, cops, skb, n, q, cl, extack);
/* Unbind the class with flilters with 0 */
tc_bind_tclass(q, portid, clid, 0);
goto out;
case RTM_GETTCLASS:
err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS, extack);
goto out;
default:
err = -EINVAL;
goto out;
}
}
if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
NL_SET_ERR_MSG(extack, "Shared blocks are not supported for classes");
return -EOPNOTSUPP;
}
new_cl = cl;
err = -EOPNOTSUPP;
if (cops->change)
err = cops->change(q, clid, portid, tca, &new_cl, extack);
if (err == 0) {
tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS, extack);
/* We just create a new class, need to do reverse binding. */
if (cl != new_cl)
tc_bind_tclass(q, portid, clid, new_cl);
}
out:
return err;
}
struct qdisc_dump_args {
struct qdisc_walker w;
struct sk_buff *skb;
struct netlink_callback *cb;
};
static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
struct qdisc_walker *arg)
{
struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
RTM_NEWTCLASS, NULL);
}
static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
struct tcmsg *tcm, struct netlink_callback *cb,
int *t_p, int s_t)
{
struct qdisc_dump_args arg;
if (tc_qdisc_dump_ignore(q, false) ||
*t_p < s_t || !q->ops->cl_ops ||
(tcm->tcm_parent &&
TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
(*t_p)++;
return 0;
}
if (*t_p > s_t)
memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
arg.w.fn = qdisc_class_dump;
arg.skb = skb;
arg.cb = cb;
arg.w.stop = 0;
arg.w.skip = cb->args[1];
arg.w.count = 0;
q->ops->cl_ops->walk(q, &arg.w);
cb->args[1] = arg.w.count;
if (arg.w.stop)
return -1;
(*t_p)++;
return 0;
}
static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
struct tcmsg *tcm, struct netlink_callback *cb,
int *t_p, int s_t, bool recur)
{
struct Qdisc *q;
int b;
if (!root)
return 0;
if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
return -1;
if (!qdisc_dev(root) || !recur)
return 0;
if (tcm->tcm_parent) {
q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
if (q && q != root &&
tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
return -1;
return 0;
}
hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
return -1;
}
return 0;
}
static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
{
struct tcmsg *tcm = nlmsg_data(cb->nlh);
struct net *net = sock_net(skb->sk);
struct netdev_queue *dev_queue;
struct net_device *dev;
int t, s_t;
if (nlmsg_len(cb->nlh) < sizeof(*tcm))
return 0;
dev = dev_get_by_index(net, tcm->tcm_ifindex);
if (!dev)
return 0;
s_t = cb->args[0];
t = 0;
if (tc_dump_tclass_root(rtnl_dereference(dev->qdisc),
skb, tcm, cb, &t, s_t, true) < 0)
goto done;
dev_queue = dev_ingress_queue(dev);
if (dev_queue &&
tc_dump_tclass_root(rtnl_dereference(dev_queue->qdisc_sleeping),
skb, tcm, cb, &t, s_t, false) < 0)
goto done;
done:
cb->args[0] = t;
dev_put(dev);
return skb->len;
}
#ifdef CONFIG_PROC_FS
static int psched_show(struct seq_file *seq, void *v)
{
seq_printf(seq, "%08x %08x %08x %08x\n",
(u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
1000000,
(u32)NSEC_PER_SEC / hrtimer_resolution);
return 0;
}
static int __net_init psched_net_init(struct net *net)
{
struct proc_dir_entry *e;
e = proc_create_single("psched", 0, net->proc_net, psched_show);
if (e == NULL)
return -ENOMEM;
return 0;
}
static void __net_exit psched_net_exit(struct net *net)
{
remove_proc_entry("psched", net->proc_net);
}
#else
static int __net_init psched_net_init(struct net *net)
{
return 0;
}
static void __net_exit psched_net_exit(struct net *net)
{
}
#endif
static struct pernet_operations psched_net_ops = {
.init = psched_net_init,
.exit = psched_net_exit,
};
#if IS_ENABLED(CONFIG_RETPOLINE)
DEFINE_STATIC_KEY_FALSE(tc_skip_wrapper);
#endif
static int __init pktsched_init(void)
{
int err;
err = register_pernet_subsys(&psched_net_ops);
if (err) {
pr_err("pktsched_init: "
"cannot initialize per netns operations\n");
return err;
}
register_qdisc(&pfifo_fast_ops);
register_qdisc(&pfifo_qdisc_ops);
register_qdisc(&bfifo_qdisc_ops);
register_qdisc(&pfifo_head_drop_qdisc_ops);
register_qdisc(&mq_qdisc_ops);
register_qdisc(&noqueue_qdisc_ops);
rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, 0);
rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, 0);
rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc,
0);
rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, 0);
rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, 0);
rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass,
0);
tc_wrapper_init();
return 0;
}
subsys_initcall(pktsched_init);
| linux-master | net/sched/sch_api.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* net/sched/sch_gred.c Generic Random Early Detection queue.
*
* Authors: J Hadi Salim ([email protected]) 1998-2002
*
* 991129: - Bug fix with grio mode
* - a better sing. AvgQ mode with Grio(WRED)
* - A finer grained VQ dequeue based on suggestion
* from Ren Liu
* - More error checks
*
* For all the glorious comments look at include/net/red.h
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <net/pkt_cls.h>
#include <net/pkt_sched.h>
#include <net/red.h>
#define GRED_DEF_PRIO (MAX_DPs / 2)
#define GRED_VQ_MASK (MAX_DPs - 1)
#define GRED_VQ_RED_FLAGS (TC_RED_ECN | TC_RED_HARDDROP)
struct gred_sched_data;
struct gred_sched;
struct gred_sched_data {
u32 limit; /* HARD maximal queue length */
u32 DP; /* the drop parameters */
u32 red_flags; /* virtualQ version of red_flags */
u64 bytesin; /* bytes seen on virtualQ so far*/
u32 packetsin; /* packets seen on virtualQ so far*/
u32 backlog; /* bytes on the virtualQ */
u8 prio; /* the prio of this vq */
struct red_parms parms;
struct red_vars vars;
struct red_stats stats;
};
enum {
GRED_WRED_MODE = 1,
GRED_RIO_MODE,
};
struct gred_sched {
struct gred_sched_data *tab[MAX_DPs];
unsigned long flags;
u32 red_flags;
u32 DPs;
u32 def;
struct red_vars wred_set;
struct tc_gred_qopt_offload *opt;
};
static inline int gred_wred_mode(struct gred_sched *table)
{
return test_bit(GRED_WRED_MODE, &table->flags);
}
static inline void gred_enable_wred_mode(struct gred_sched *table)
{
__set_bit(GRED_WRED_MODE, &table->flags);
}
static inline void gred_disable_wred_mode(struct gred_sched *table)
{
__clear_bit(GRED_WRED_MODE, &table->flags);
}
static inline int gred_rio_mode(struct gred_sched *table)
{
return test_bit(GRED_RIO_MODE, &table->flags);
}
static inline void gred_enable_rio_mode(struct gred_sched *table)
{
__set_bit(GRED_RIO_MODE, &table->flags);
}
static inline void gred_disable_rio_mode(struct gred_sched *table)
{
__clear_bit(GRED_RIO_MODE, &table->flags);
}
static inline int gred_wred_mode_check(struct Qdisc *sch)
{
struct gred_sched *table = qdisc_priv(sch);
int i;
/* Really ugly O(n^2) but shouldn't be necessary too frequent. */
for (i = 0; i < table->DPs; i++) {
struct gred_sched_data *q = table->tab[i];
int n;
if (q == NULL)
continue;
for (n = i + 1; n < table->DPs; n++)
if (table->tab[n] && table->tab[n]->prio == q->prio)
return 1;
}
return 0;
}
static inline unsigned int gred_backlog(struct gred_sched *table,
struct gred_sched_data *q,
struct Qdisc *sch)
{
if (gred_wred_mode(table))
return sch->qstats.backlog;
else
return q->backlog;
}
static inline u16 tc_index_to_dp(struct sk_buff *skb)
{
return skb->tc_index & GRED_VQ_MASK;
}
static inline void gred_load_wred_set(const struct gred_sched *table,
struct gred_sched_data *q)
{
q->vars.qavg = table->wred_set.qavg;
q->vars.qidlestart = table->wred_set.qidlestart;
}
static inline void gred_store_wred_set(struct gred_sched *table,
struct gred_sched_data *q)
{
table->wred_set.qavg = q->vars.qavg;
table->wred_set.qidlestart = q->vars.qidlestart;
}
static int gred_use_ecn(struct gred_sched_data *q)
{
return q->red_flags & TC_RED_ECN;
}
static int gred_use_harddrop(struct gred_sched_data *q)
{
return q->red_flags & TC_RED_HARDDROP;
}
static bool gred_per_vq_red_flags_used(struct gred_sched *table)
{
unsigned int i;
/* Local per-vq flags couldn't have been set unless global are 0 */
if (table->red_flags)
return false;
for (i = 0; i < MAX_DPs; i++)
if (table->tab[i] && table->tab[i]->red_flags)
return true;
return false;
}
static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct gred_sched_data *q = NULL;
struct gred_sched *t = qdisc_priv(sch);
unsigned long qavg = 0;
u16 dp = tc_index_to_dp(skb);
if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
dp = t->def;
q = t->tab[dp];
if (!q) {
/* Pass through packets not assigned to a DP
* if no default DP has been configured. This
* allows for DP flows to be left untouched.
*/
if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <=
sch->limit))
return qdisc_enqueue_tail(skb, sch);
else
goto drop;
}
/* fix tc_index? --could be controversial but needed for
requeueing */
skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
}
/* sum up all the qaves of prios < ours to get the new qave */
if (!gred_wred_mode(t) && gred_rio_mode(t)) {
int i;
for (i = 0; i < t->DPs; i++) {
if (t->tab[i] && t->tab[i]->prio < q->prio &&
!red_is_idling(&t->tab[i]->vars))
qavg += t->tab[i]->vars.qavg;
}
}
q->packetsin++;
q->bytesin += qdisc_pkt_len(skb);
if (gred_wred_mode(t))
gred_load_wred_set(t, q);
q->vars.qavg = red_calc_qavg(&q->parms,
&q->vars,
gred_backlog(t, q, sch));
if (red_is_idling(&q->vars))
red_end_of_idle_period(&q->vars);
if (gred_wred_mode(t))
gred_store_wred_set(t, q);
switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) {
case RED_DONT_MARK:
break;
case RED_PROB_MARK:
qdisc_qstats_overlimit(sch);
if (!gred_use_ecn(q) || !INET_ECN_set_ce(skb)) {
q->stats.prob_drop++;
goto congestion_drop;
}
q->stats.prob_mark++;
break;
case RED_HARD_MARK:
qdisc_qstats_overlimit(sch);
if (gred_use_harddrop(q) || !gred_use_ecn(q) ||
!INET_ECN_set_ce(skb)) {
q->stats.forced_drop++;
goto congestion_drop;
}
q->stats.forced_mark++;
break;
}
if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) {
q->backlog += qdisc_pkt_len(skb);
return qdisc_enqueue_tail(skb, sch);
}
q->stats.pdrop++;
drop:
return qdisc_drop(skb, sch, to_free);
congestion_drop:
qdisc_drop(skb, sch, to_free);
return NET_XMIT_CN;
}
static struct sk_buff *gred_dequeue(struct Qdisc *sch)
{
struct sk_buff *skb;
struct gred_sched *t = qdisc_priv(sch);
skb = qdisc_dequeue_head(sch);
if (skb) {
struct gred_sched_data *q;
u16 dp = tc_index_to_dp(skb);
if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n",
tc_index_to_dp(skb));
} else {
q->backlog -= qdisc_pkt_len(skb);
if (gred_wred_mode(t)) {
if (!sch->qstats.backlog)
red_start_of_idle_period(&t->wred_set);
} else {
if (!q->backlog)
red_start_of_idle_period(&q->vars);
}
}
return skb;
}
return NULL;
}
static void gred_reset(struct Qdisc *sch)
{
int i;
struct gred_sched *t = qdisc_priv(sch);
qdisc_reset_queue(sch);
for (i = 0; i < t->DPs; i++) {
struct gred_sched_data *q = t->tab[i];
if (!q)
continue;
red_restart(&q->vars);
q->backlog = 0;
}
}
static void gred_offload(struct Qdisc *sch, enum tc_gred_command command)
{
struct gred_sched *table = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
struct tc_gred_qopt_offload *opt = table->opt;
if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
return;
memset(opt, 0, sizeof(*opt));
opt->command = command;
opt->handle = sch->handle;
opt->parent = sch->parent;
if (command == TC_GRED_REPLACE) {
unsigned int i;
opt->set.grio_on = gred_rio_mode(table);
opt->set.wred_on = gred_wred_mode(table);
opt->set.dp_cnt = table->DPs;
opt->set.dp_def = table->def;
for (i = 0; i < table->DPs; i++) {
struct gred_sched_data *q = table->tab[i];
if (!q)
continue;
opt->set.tab[i].present = true;
opt->set.tab[i].limit = q->limit;
opt->set.tab[i].prio = q->prio;
opt->set.tab[i].min = q->parms.qth_min >> q->parms.Wlog;
opt->set.tab[i].max = q->parms.qth_max >> q->parms.Wlog;
opt->set.tab[i].is_ecn = gred_use_ecn(q);
opt->set.tab[i].is_harddrop = gred_use_harddrop(q);
opt->set.tab[i].probability = q->parms.max_P;
opt->set.tab[i].backlog = &q->backlog;
}
opt->set.qstats = &sch->qstats;
}
dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_GRED, opt);
}
static int gred_offload_dump_stats(struct Qdisc *sch)
{
struct gred_sched *table = qdisc_priv(sch);
struct tc_gred_qopt_offload *hw_stats;
u64 bytes = 0, packets = 0;
unsigned int i;
int ret;
hw_stats = kzalloc(sizeof(*hw_stats), GFP_KERNEL);
if (!hw_stats)
return -ENOMEM;
hw_stats->command = TC_GRED_STATS;
hw_stats->handle = sch->handle;
hw_stats->parent = sch->parent;
for (i = 0; i < MAX_DPs; i++) {
gnet_stats_basic_sync_init(&hw_stats->stats.bstats[i]);
if (table->tab[i])
hw_stats->stats.xstats[i] = &table->tab[i]->stats;
}
ret = qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_GRED, hw_stats);
/* Even if driver returns failure adjust the stats - in case offload
* ended but driver still wants to adjust the values.
*/
sch_tree_lock(sch);
for (i = 0; i < MAX_DPs; i++) {
if (!table->tab[i])
continue;
table->tab[i]->packetsin += u64_stats_read(&hw_stats->stats.bstats[i].packets);
table->tab[i]->bytesin += u64_stats_read(&hw_stats->stats.bstats[i].bytes);
table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog;
bytes += u64_stats_read(&hw_stats->stats.bstats[i].bytes);
packets += u64_stats_read(&hw_stats->stats.bstats[i].packets);
sch->qstats.qlen += hw_stats->stats.qstats[i].qlen;
sch->qstats.backlog += hw_stats->stats.qstats[i].backlog;
sch->qstats.drops += hw_stats->stats.qstats[i].drops;
sch->qstats.requeues += hw_stats->stats.qstats[i].requeues;
sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits;
}
_bstats_update(&sch->bstats, bytes, packets);
sch_tree_unlock(sch);
kfree(hw_stats);
return ret;
}
static inline void gred_destroy_vq(struct gred_sched_data *q)
{
kfree(q);
}
static int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps,
struct netlink_ext_ack *extack)
{
struct gred_sched *table = qdisc_priv(sch);
struct tc_gred_sopt *sopt;
bool red_flags_changed;
int i;
if (!dps)
return -EINVAL;
sopt = nla_data(dps);
if (sopt->DPs > MAX_DPs) {
NL_SET_ERR_MSG_MOD(extack, "number of virtual queues too high");
return -EINVAL;
}
if (sopt->DPs == 0) {
NL_SET_ERR_MSG_MOD(extack,
"number of virtual queues can't be 0");
return -EINVAL;
}
if (sopt->def_DP >= sopt->DPs) {
NL_SET_ERR_MSG_MOD(extack, "default virtual queue above virtual queue count");
return -EINVAL;
}
if (sopt->flags && gred_per_vq_red_flags_used(table)) {
NL_SET_ERR_MSG_MOD(extack, "can't set per-Qdisc RED flags when per-virtual queue flags are used");
return -EINVAL;
}
sch_tree_lock(sch);
table->DPs = sopt->DPs;
table->def = sopt->def_DP;
red_flags_changed = table->red_flags != sopt->flags;
table->red_flags = sopt->flags;
/*
* Every entry point to GRED is synchronized with the above code
* and the DP is checked against DPs, i.e. shadowed VQs can no
* longer be found so we can unlock right here.
*/
sch_tree_unlock(sch);
if (sopt->grio) {
gred_enable_rio_mode(table);
gred_disable_wred_mode(table);
if (gred_wred_mode_check(sch))
gred_enable_wred_mode(table);
} else {
gred_disable_rio_mode(table);
gred_disable_wred_mode(table);
}
if (red_flags_changed)
for (i = 0; i < table->DPs; i++)
if (table->tab[i])
table->tab[i]->red_flags =
table->red_flags & GRED_VQ_RED_FLAGS;
for (i = table->DPs; i < MAX_DPs; i++) {
if (table->tab[i]) {
pr_warn("GRED: Warning: Destroying shadowed VQ 0x%x\n",
i);
gred_destroy_vq(table->tab[i]);
table->tab[i] = NULL;
}
}
gred_offload(sch, TC_GRED_REPLACE);
return 0;
}
static inline int gred_change_vq(struct Qdisc *sch, int dp,
struct tc_gred_qopt *ctl, int prio,
u8 *stab, u32 max_P,
struct gred_sched_data **prealloc,
struct netlink_ext_ack *extack)
{
struct gred_sched *table = qdisc_priv(sch);
struct gred_sched_data *q = table->tab[dp];
if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab)) {
NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters");
return -EINVAL;
}
if (!q) {
table->tab[dp] = q = *prealloc;
*prealloc = NULL;
if (!q)
return -ENOMEM;
q->red_flags = table->red_flags & GRED_VQ_RED_FLAGS;
}
q->DP = dp;
q->prio = prio;
if (ctl->limit > sch->limit)
q->limit = sch->limit;
else
q->limit = ctl->limit;
if (q->backlog == 0)
red_end_of_idle_period(&q->vars);
red_set_parms(&q->parms,
ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
ctl->Scell_log, stab, max_P);
red_set_vars(&q->vars);
return 0;
}
static const struct nla_policy gred_vq_policy[TCA_GRED_VQ_MAX + 1] = {
[TCA_GRED_VQ_DP] = { .type = NLA_U32 },
[TCA_GRED_VQ_FLAGS] = { .type = NLA_U32 },
};
static const struct nla_policy gred_vqe_policy[TCA_GRED_VQ_ENTRY_MAX + 1] = {
[TCA_GRED_VQ_ENTRY] = { .type = NLA_NESTED },
};
static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
[TCA_GRED_PARMS] = { .len = sizeof(struct tc_gred_qopt) },
[TCA_GRED_STAB] = { .len = 256 },
[TCA_GRED_DPS] = { .len = sizeof(struct tc_gred_sopt) },
[TCA_GRED_MAX_P] = { .type = NLA_U32 },
[TCA_GRED_LIMIT] = { .type = NLA_U32 },
[TCA_GRED_VQ_LIST] = { .type = NLA_NESTED },
};
static void gred_vq_apply(struct gred_sched *table, const struct nlattr *entry)
{
struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
u32 dp;
nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry,
gred_vq_policy, NULL);
dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
if (tb[TCA_GRED_VQ_FLAGS])
table->tab[dp]->red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
}
static void gred_vqs_apply(struct gred_sched *table, struct nlattr *vqs)
{
const struct nlattr *attr;
int rem;
nla_for_each_nested(attr, vqs, rem) {
switch (nla_type(attr)) {
case TCA_GRED_VQ_ENTRY:
gred_vq_apply(table, attr);
break;
}
}
}
static int gred_vq_validate(struct gred_sched *table, u32 cdp,
const struct nlattr *entry,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
int err;
u32 dp;
err = nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry,
gred_vq_policy, extack);
if (err < 0)
return err;
if (!tb[TCA_GRED_VQ_DP]) {
NL_SET_ERR_MSG_MOD(extack, "Virtual queue with no index specified");
return -EINVAL;
}
dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
if (dp >= table->DPs) {
NL_SET_ERR_MSG_MOD(extack, "Virtual queue with index out of bounds");
return -EINVAL;
}
if (dp != cdp && !table->tab[dp]) {
NL_SET_ERR_MSG_MOD(extack, "Virtual queue not yet instantiated");
return -EINVAL;
}
if (tb[TCA_GRED_VQ_FLAGS]) {
u32 red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
if (table->red_flags && table->red_flags != red_flags) {
NL_SET_ERR_MSG_MOD(extack, "can't change per-virtual queue RED flags when per-Qdisc flags are used");
return -EINVAL;
}
if (red_flags & ~GRED_VQ_RED_FLAGS) {
NL_SET_ERR_MSG_MOD(extack,
"invalid RED flags specified");
return -EINVAL;
}
}
return 0;
}
static int gred_vqs_validate(struct gred_sched *table, u32 cdp,
struct nlattr *vqs, struct netlink_ext_ack *extack)
{
const struct nlattr *attr;
int rem, err;
err = nla_validate_nested_deprecated(vqs, TCA_GRED_VQ_ENTRY_MAX,
gred_vqe_policy, extack);
if (err < 0)
return err;
nla_for_each_nested(attr, vqs, rem) {
switch (nla_type(attr)) {
case TCA_GRED_VQ_ENTRY:
err = gred_vq_validate(table, cdp, attr, extack);
if (err)
return err;
break;
default:
NL_SET_ERR_MSG_MOD(extack, "GRED_VQ_LIST can contain only entry attributes");
return -EINVAL;
}
}
if (rem > 0) {
NL_SET_ERR_MSG_MOD(extack, "Trailing data after parsing virtual queue list");
return -EINVAL;
}
return 0;
}
static int gred_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
struct gred_sched *table = qdisc_priv(sch);
struct tc_gred_qopt *ctl;
struct nlattr *tb[TCA_GRED_MAX + 1];
int err, prio = GRED_DEF_PRIO;
u8 *stab;
u32 max_P;
struct gred_sched_data *prealloc;
err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy,
extack);
if (err < 0)
return err;
if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) {
if (tb[TCA_GRED_LIMIT] != NULL)
sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
}
if (tb[TCA_GRED_PARMS] == NULL ||
tb[TCA_GRED_STAB] == NULL ||
tb[TCA_GRED_LIMIT] != NULL) {
NL_SET_ERR_MSG_MOD(extack, "can't configure Qdisc and virtual queue at the same time");
return -EINVAL;
}
max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
ctl = nla_data(tb[TCA_GRED_PARMS]);
stab = nla_data(tb[TCA_GRED_STAB]);
if (ctl->DP >= table->DPs) {
NL_SET_ERR_MSG_MOD(extack, "virtual queue index above virtual queue count");
return -EINVAL;
}
if (tb[TCA_GRED_VQ_LIST]) {
err = gred_vqs_validate(table, ctl->DP, tb[TCA_GRED_VQ_LIST],
extack);
if (err)
return err;
}
if (gred_rio_mode(table)) {
if (ctl->prio == 0) {
int def_prio = GRED_DEF_PRIO;
if (table->tab[table->def])
def_prio = table->tab[table->def]->prio;
printk(KERN_DEBUG "GRED: DP %u does not have a prio "
"setting default to %d\n", ctl->DP, def_prio);
prio = def_prio;
} else
prio = ctl->prio;
}
prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
sch_tree_lock(sch);
err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc,
extack);
if (err < 0)
goto err_unlock_free;
if (tb[TCA_GRED_VQ_LIST])
gred_vqs_apply(table, tb[TCA_GRED_VQ_LIST]);
if (gred_rio_mode(table)) {
gred_disable_wred_mode(table);
if (gred_wred_mode_check(sch))
gred_enable_wred_mode(table);
}
sch_tree_unlock(sch);
kfree(prealloc);
gred_offload(sch, TC_GRED_REPLACE);
return 0;
err_unlock_free:
sch_tree_unlock(sch);
kfree(prealloc);
return err;
}
static int gred_init(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
struct gred_sched *table = qdisc_priv(sch);
struct nlattr *tb[TCA_GRED_MAX + 1];
int err;
if (!opt)
return -EINVAL;
err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy,
extack);
if (err < 0)
return err;
if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB]) {
NL_SET_ERR_MSG_MOD(extack,
"virtual queue configuration can't be specified at initialization time");
return -EINVAL;
}
if (tb[TCA_GRED_LIMIT])
sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
else
sch->limit = qdisc_dev(sch)->tx_queue_len
* psched_mtu(qdisc_dev(sch));
if (qdisc_dev(sch)->netdev_ops->ndo_setup_tc) {
table->opt = kzalloc(sizeof(*table->opt), GFP_KERNEL);
if (!table->opt)
return -ENOMEM;
}
return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
}
static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct gred_sched *table = qdisc_priv(sch);
struct nlattr *parms, *vqs, *opts = NULL;
int i;
u32 max_p[MAX_DPs];
struct tc_gred_sopt sopt = {
.DPs = table->DPs,
.def_DP = table->def,
.grio = gred_rio_mode(table),
.flags = table->red_flags,
};
if (gred_offload_dump_stats(sch))
goto nla_put_failure;
opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
if (opts == NULL)
goto nla_put_failure;
if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt))
goto nla_put_failure;
for (i = 0; i < MAX_DPs; i++) {
struct gred_sched_data *q = table->tab[i];
max_p[i] = q ? q->parms.max_P : 0;
}
if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit))
goto nla_put_failure;
/* Old style all-in-one dump of VQs */
parms = nla_nest_start_noflag(skb, TCA_GRED_PARMS);
if (parms == NULL)
goto nla_put_failure;
for (i = 0; i < MAX_DPs; i++) {
struct gred_sched_data *q = table->tab[i];
struct tc_gred_qopt opt;
unsigned long qavg;
memset(&opt, 0, sizeof(opt));
if (!q) {
/* hack -- fix at some point with proper message
This is how we indicate to tc that there is no VQ
at this DP */
opt.DP = MAX_DPs + i;
goto append_opt;
}
opt.limit = q->limit;
opt.DP = q->DP;
opt.backlog = gred_backlog(table, q, sch);
opt.prio = q->prio;
opt.qth_min = q->parms.qth_min >> q->parms.Wlog;
opt.qth_max = q->parms.qth_max >> q->parms.Wlog;
opt.Wlog = q->parms.Wlog;
opt.Plog = q->parms.Plog;
opt.Scell_log = q->parms.Scell_log;
opt.early = q->stats.prob_drop;
opt.forced = q->stats.forced_drop;
opt.pdrop = q->stats.pdrop;
opt.packets = q->packetsin;
opt.bytesin = q->bytesin;
if (gred_wred_mode(table))
gred_load_wred_set(table, q);
qavg = red_calc_qavg(&q->parms, &q->vars,
q->vars.qavg >> q->parms.Wlog);
opt.qave = qavg >> q->parms.Wlog;
append_opt:
if (nla_append(skb, sizeof(opt), &opt) < 0)
goto nla_put_failure;
}
nla_nest_end(skb, parms);
/* Dump the VQs again, in more structured way */
vqs = nla_nest_start_noflag(skb, TCA_GRED_VQ_LIST);
if (!vqs)
goto nla_put_failure;
for (i = 0; i < MAX_DPs; i++) {
struct gred_sched_data *q = table->tab[i];
struct nlattr *vq;
if (!q)
continue;
vq = nla_nest_start_noflag(skb, TCA_GRED_VQ_ENTRY);
if (!vq)
goto nla_put_failure;
if (nla_put_u32(skb, TCA_GRED_VQ_DP, q->DP))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_GRED_VQ_FLAGS, q->red_flags))
goto nla_put_failure;
/* Stats */
if (nla_put_u64_64bit(skb, TCA_GRED_VQ_STAT_BYTES, q->bytesin,
TCA_GRED_VQ_PAD))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PACKETS, q->packetsin))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_GRED_VQ_STAT_BACKLOG,
gred_backlog(table, q, sch)))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_DROP,
q->stats.prob_drop))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_MARK,
q->stats.prob_mark))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_DROP,
q->stats.forced_drop))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_MARK,
q->stats.forced_mark))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PDROP, q->stats.pdrop))
goto nla_put_failure;
nla_nest_end(skb, vq);
}
nla_nest_end(skb, vqs);
return nla_nest_end(skb, opts);
nla_put_failure:
nla_nest_cancel(skb, opts);
return -EMSGSIZE;
}
static void gred_destroy(struct Qdisc *sch)
{
struct gred_sched *table = qdisc_priv(sch);
int i;
for (i = 0; i < table->DPs; i++)
gred_destroy_vq(table->tab[i]);
gred_offload(sch, TC_GRED_DESTROY);
kfree(table->opt);
}
static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
.id = "gred",
.priv_size = sizeof(struct gred_sched),
.enqueue = gred_enqueue,
.dequeue = gred_dequeue,
.peek = qdisc_peek_head,
.init = gred_init,
.reset = gred_reset,
.destroy = gred_destroy,
.change = gred_change,
.dump = gred_dump,
.owner = THIS_MODULE,
};
static int __init gred_module_init(void)
{
return register_qdisc(&gred_qdisc_ops);
}
static void __exit gred_module_exit(void)
{
unregister_qdisc(&gred_qdisc_ops);
}
module_init(gred_module_init)
module_exit(gred_module_exit)
MODULE_LICENSE("GPL");
| linux-master | net/sched/sch_gred.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* net/sched/sch_choke.c CHOKE scheduler
*
* Copyright (c) 2011 Stephen Hemminger <[email protected]>
* Copyright (c) 2011 Eric Dumazet <[email protected]>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
#include <net/inet_ecn.h>
#include <net/red.h>
#include <net/flow_dissector.h>
/*
CHOKe stateless AQM for fair bandwidth allocation
=================================================
CHOKe (CHOose and Keep for responsive flows, CHOose and Kill for
unresponsive flows) is a variant of RED that penalizes misbehaving flows but
maintains no flow state. The difference from RED is an additional step
during the enqueuing process. If average queue size is over the
low threshold (qmin), a packet is chosen at random from the queue.
If both the new and chosen packet are from the same flow, both
are dropped. Unlike RED, CHOKe is not really a "classful" qdisc because it
needs to access packets in queue randomly. It has a minimal class
interface to allow overriding the builtin flow classifier with
filters.
Source:
R. Pan, B. Prabhakar, and K. Psounis, "CHOKe, A Stateless
Active Queue Management Scheme for Approximating Fair Bandwidth Allocation",
IEEE INFOCOM, 2000.
A. Tang, J. Wang, S. Low, "Understanding CHOKe: Throughput and Spatial
Characteristics", IEEE/ACM Transactions on Networking, 2004
*/
/* Upper bound on size of sk_buff table (packets) */
#define CHOKE_MAX_QUEUE (128*1024 - 1)
struct choke_sched_data {
/* Parameters */
u32 limit;
unsigned char flags;
struct red_parms parms;
/* Variables */
struct red_vars vars;
struct {
u32 prob_drop; /* Early probability drops */
u32 prob_mark; /* Early probability marks */
u32 forced_drop; /* Forced drops, qavg > max_thresh */
u32 forced_mark; /* Forced marks, qavg > max_thresh */
u32 pdrop; /* Drops due to queue limits */
u32 matched; /* Drops to flow match */
} stats;
unsigned int head;
unsigned int tail;
unsigned int tab_mask; /* size - 1 */
struct sk_buff **tab;
};
/* number of elements in queue including holes */
static unsigned int choke_len(const struct choke_sched_data *q)
{
return (q->tail - q->head) & q->tab_mask;
}
/* Is ECN parameter configured */
static int use_ecn(const struct choke_sched_data *q)
{
return q->flags & TC_RED_ECN;
}
/* Should packets over max just be dropped (versus marked) */
static int use_harddrop(const struct choke_sched_data *q)
{
return q->flags & TC_RED_HARDDROP;
}
/* Move head pointer forward to skip over holes */
static void choke_zap_head_holes(struct choke_sched_data *q)
{
do {
q->head = (q->head + 1) & q->tab_mask;
if (q->head == q->tail)
break;
} while (q->tab[q->head] == NULL);
}
/* Move tail pointer backwards to reuse holes */
static void choke_zap_tail_holes(struct choke_sched_data *q)
{
do {
q->tail = (q->tail - 1) & q->tab_mask;
if (q->head == q->tail)
break;
} while (q->tab[q->tail] == NULL);
}
/* Drop packet from queue array by creating a "hole" */
static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx,
struct sk_buff **to_free)
{
struct choke_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb = q->tab[idx];
q->tab[idx] = NULL;
if (idx == q->head)
choke_zap_head_holes(q);
if (idx == q->tail)
choke_zap_tail_holes(q);
qdisc_qstats_backlog_dec(sch, skb);
qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
qdisc_drop(skb, sch, to_free);
--sch->q.qlen;
}
struct choke_skb_cb {
u8 keys_valid;
struct flow_keys_digest keys;
};
static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
{
qdisc_cb_private_validate(skb, sizeof(struct choke_skb_cb));
return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
}
/*
* Compare flow of two packets
* Returns true only if source and destination address and port match.
* false for special cases
*/
static bool choke_match_flow(struct sk_buff *skb1,
struct sk_buff *skb2)
{
struct flow_keys temp;
if (skb1->protocol != skb2->protocol)
return false;
if (!choke_skb_cb(skb1)->keys_valid) {
choke_skb_cb(skb1)->keys_valid = 1;
skb_flow_dissect_flow_keys(skb1, &temp, 0);
make_flow_keys_digest(&choke_skb_cb(skb1)->keys, &temp);
}
if (!choke_skb_cb(skb2)->keys_valid) {
choke_skb_cb(skb2)->keys_valid = 1;
skb_flow_dissect_flow_keys(skb2, &temp, 0);
make_flow_keys_digest(&choke_skb_cb(skb2)->keys, &temp);
}
return !memcmp(&choke_skb_cb(skb1)->keys,
&choke_skb_cb(skb2)->keys,
sizeof(choke_skb_cb(skb1)->keys));
}
/*
* Select a packet at random from queue
* HACK: since queue can have holes from previous deletion; retry several
* times to find a random skb but then just give up and return the head
* Will return NULL if queue is empty (q->head == q->tail)
*/
static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
unsigned int *pidx)
{
struct sk_buff *skb;
int retrys = 3;
do {
*pidx = (q->head + get_random_u32_below(choke_len(q))) & q->tab_mask;
skb = q->tab[*pidx];
if (skb)
return skb;
} while (--retrys > 0);
return q->tab[*pidx = q->head];
}
/*
* Compare new packet with random packet in queue
* returns true if matched and sets *pidx
*/
static bool choke_match_random(const struct choke_sched_data *q,
struct sk_buff *nskb,
unsigned int *pidx)
{
struct sk_buff *oskb;
if (q->head == q->tail)
return false;
oskb = choke_peek_random(q, pidx);
return choke_match_flow(oskb, nskb);
}
static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct choke_sched_data *q = qdisc_priv(sch);
const struct red_parms *p = &q->parms;
choke_skb_cb(skb)->keys_valid = 0;
/* Compute average queue usage (see RED) */
q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen);
if (red_is_idling(&q->vars))
red_end_of_idle_period(&q->vars);
/* Is queue small? */
if (q->vars.qavg <= p->qth_min)
q->vars.qcount = -1;
else {
unsigned int idx;
/* Draw a packet at random from queue and compare flow */
if (choke_match_random(q, skb, &idx)) {
q->stats.matched++;
choke_drop_by_idx(sch, idx, to_free);
goto congestion_drop;
}
/* Queue is large, always mark/drop */
if (q->vars.qavg > p->qth_max) {
q->vars.qcount = -1;
qdisc_qstats_overlimit(sch);
if (use_harddrop(q) || !use_ecn(q) ||
!INET_ECN_set_ce(skb)) {
q->stats.forced_drop++;
goto congestion_drop;
}
q->stats.forced_mark++;
} else if (++q->vars.qcount) {
if (red_mark_probability(p, &q->vars, q->vars.qavg)) {
q->vars.qcount = 0;
q->vars.qR = red_random(p);
qdisc_qstats_overlimit(sch);
if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
q->stats.prob_drop++;
goto congestion_drop;
}
q->stats.prob_mark++;
}
} else
q->vars.qR = red_random(p);
}
/* Admit new packet */
if (sch->q.qlen < q->limit) {
q->tab[q->tail] = skb;
q->tail = (q->tail + 1) & q->tab_mask;
++sch->q.qlen;
qdisc_qstats_backlog_inc(sch, skb);
return NET_XMIT_SUCCESS;
}
q->stats.pdrop++;
return qdisc_drop(skb, sch, to_free);
congestion_drop:
qdisc_drop(skb, sch, to_free);
return NET_XMIT_CN;
}
static struct sk_buff *choke_dequeue(struct Qdisc *sch)
{
struct choke_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb;
if (q->head == q->tail) {
if (!red_is_idling(&q->vars))
red_start_of_idle_period(&q->vars);
return NULL;
}
skb = q->tab[q->head];
q->tab[q->head] = NULL;
choke_zap_head_holes(q);
--sch->q.qlen;
qdisc_qstats_backlog_dec(sch, skb);
qdisc_bstats_update(sch, skb);
return skb;
}
static void choke_reset(struct Qdisc *sch)
{
struct choke_sched_data *q = qdisc_priv(sch);
while (q->head != q->tail) {
struct sk_buff *skb = q->tab[q->head];
q->head = (q->head + 1) & q->tab_mask;
if (!skb)
continue;
rtnl_qdisc_drop(skb, sch);
}
if (q->tab)
memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
q->head = q->tail = 0;
red_restart(&q->vars);
}
static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
[TCA_CHOKE_PARMS] = { .len = sizeof(struct tc_red_qopt) },
[TCA_CHOKE_STAB] = { .len = RED_STAB_SIZE },
[TCA_CHOKE_MAX_P] = { .type = NLA_U32 },
};
static void choke_free(void *addr)
{
kvfree(addr);
}
static int choke_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
struct choke_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_CHOKE_MAX + 1];
const struct tc_red_qopt *ctl;
int err;
struct sk_buff **old = NULL;
unsigned int mask;
u32 max_P;
u8 *stab;
if (opt == NULL)
return -EINVAL;
err = nla_parse_nested_deprecated(tb, TCA_CHOKE_MAX, opt,
choke_policy, NULL);
if (err < 0)
return err;
if (tb[TCA_CHOKE_PARMS] == NULL ||
tb[TCA_CHOKE_STAB] == NULL)
return -EINVAL;
max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;
ctl = nla_data(tb[TCA_CHOKE_PARMS]);
stab = nla_data(tb[TCA_CHOKE_STAB]);
if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab))
return -EINVAL;
if (ctl->limit > CHOKE_MAX_QUEUE)
return -EINVAL;
mask = roundup_pow_of_two(ctl->limit + 1) - 1;
if (mask != q->tab_mask) {
struct sk_buff **ntab;
ntab = kvcalloc(mask + 1, sizeof(struct sk_buff *), GFP_KERNEL);
if (!ntab)
return -ENOMEM;
sch_tree_lock(sch);
old = q->tab;
if (old) {
unsigned int oqlen = sch->q.qlen, tail = 0;
unsigned dropped = 0;
while (q->head != q->tail) {
struct sk_buff *skb = q->tab[q->head];
q->head = (q->head + 1) & q->tab_mask;
if (!skb)
continue;
if (tail < mask) {
ntab[tail++] = skb;
continue;
}
dropped += qdisc_pkt_len(skb);
qdisc_qstats_backlog_dec(sch, skb);
--sch->q.qlen;
rtnl_qdisc_drop(skb, sch);
}
qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped);
q->head = 0;
q->tail = tail;
}
q->tab_mask = mask;
q->tab = ntab;
} else
sch_tree_lock(sch);
q->flags = ctl->flags;
q->limit = ctl->limit;
red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
ctl->Plog, ctl->Scell_log,
stab,
max_P);
red_set_vars(&q->vars);
if (q->head == q->tail)
red_end_of_idle_period(&q->vars);
sch_tree_unlock(sch);
choke_free(old);
return 0;
}
static int choke_init(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
return choke_change(sch, opt, extack);
}
static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct choke_sched_data *q = qdisc_priv(sch);
struct nlattr *opts = NULL;
struct tc_red_qopt opt = {
.limit = q->limit,
.flags = q->flags,
.qth_min = q->parms.qth_min >> q->parms.Wlog,
.qth_max = q->parms.qth_max >> q->parms.Wlog,
.Wlog = q->parms.Wlog,
.Plog = q->parms.Plog,
.Scell_log = q->parms.Scell_log,
};
opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
if (opts == NULL)
goto nla_put_failure;
if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) ||
nla_put_u32(skb, TCA_CHOKE_MAX_P, q->parms.max_P))
goto nla_put_failure;
return nla_nest_end(skb, opts);
nla_put_failure:
nla_nest_cancel(skb, opts);
return -EMSGSIZE;
}
static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
{
struct choke_sched_data *q = qdisc_priv(sch);
struct tc_choke_xstats st = {
.early = q->stats.prob_drop + q->stats.forced_drop,
.marked = q->stats.prob_mark + q->stats.forced_mark,
.pdrop = q->stats.pdrop,
.matched = q->stats.matched,
};
return gnet_stats_copy_app(d, &st, sizeof(st));
}
static void choke_destroy(struct Qdisc *sch)
{
struct choke_sched_data *q = qdisc_priv(sch);
choke_free(q->tab);
}
static struct sk_buff *choke_peek_head(struct Qdisc *sch)
{
struct choke_sched_data *q = qdisc_priv(sch);
return (q->head != q->tail) ? q->tab[q->head] : NULL;
}
static struct Qdisc_ops choke_qdisc_ops __read_mostly = {
.id = "choke",
.priv_size = sizeof(struct choke_sched_data),
.enqueue = choke_enqueue,
.dequeue = choke_dequeue,
.peek = choke_peek_head,
.init = choke_init,
.destroy = choke_destroy,
.reset = choke_reset,
.change = choke_change,
.dump = choke_dump,
.dump_stats = choke_dump_stats,
.owner = THIS_MODULE,
};
static int __init choke_module_init(void)
{
return register_qdisc(&choke_qdisc_ops);
}
static void __exit choke_module_exit(void)
{
unregister_qdisc(&choke_qdisc_ops);
}
module_init(choke_module_init)
module_exit(choke_module_exit)
MODULE_LICENSE("GPL");
| linux-master | net/sched/sch_choke.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* net/sched/sch_generic.c Generic packet scheduler routines.
*
* Authors: Alexey Kuznetsov, <[email protected]>
* Jamal Hadi Salim, <[email protected]> 990601
* - Ingress support
*/
#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/init.h>
#include <linux/rcupdate.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/if_vlan.h>
#include <linux/skb_array.h>
#include <linux/if_macvlan.h>
#include <net/sch_generic.h>
#include <net/pkt_sched.h>
#include <net/dst.h>
#include <trace/events/qdisc.h>
#include <trace/events/net.h>
#include <net/xfrm.h>
/* Qdisc to use by default */
const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
EXPORT_SYMBOL(default_qdisc_ops);
static void qdisc_maybe_clear_missed(struct Qdisc *q,
const struct netdev_queue *txq)
{
clear_bit(__QDISC_STATE_MISSED, &q->state);
/* Make sure the below netif_xmit_frozen_or_stopped()
* checking happens after clearing STATE_MISSED.
*/
smp_mb__after_atomic();
/* Checking netif_xmit_frozen_or_stopped() again to
* make sure STATE_MISSED is set if the STATE_MISSED
* set by netif_tx_wake_queue()'s rescheduling of
* net_tx_action() is cleared by the above clear_bit().
*/
if (!netif_xmit_frozen_or_stopped(txq))
set_bit(__QDISC_STATE_MISSED, &q->state);
else
set_bit(__QDISC_STATE_DRAINING, &q->state);
}
/* Main transmission queue. */
/* Modifications to data participating in scheduling must be protected with
* qdisc_lock(qdisc) spinlock.
*
* The idea is the following:
* - enqueue, dequeue are serialized via qdisc root lock
* - ingress filtering is also serialized via qdisc root lock
* - updates to tree and tree walking are only done under the rtnl mutex.
*/
#define SKB_XOFF_MAGIC ((struct sk_buff *)1UL)
static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
{
const struct netdev_queue *txq = q->dev_queue;
spinlock_t *lock = NULL;
struct sk_buff *skb;
if (q->flags & TCQ_F_NOLOCK) {
lock = qdisc_lock(q);
spin_lock(lock);
}
skb = skb_peek(&q->skb_bad_txq);
if (skb) {
/* check the reason of requeuing without tx lock first */
txq = skb_get_tx_queue(txq->dev, skb);
if (!netif_xmit_frozen_or_stopped(txq)) {
skb = __skb_dequeue(&q->skb_bad_txq);
if (qdisc_is_percpu_stats(q)) {
qdisc_qstats_cpu_backlog_dec(q, skb);
qdisc_qstats_cpu_qlen_dec(q);
} else {
qdisc_qstats_backlog_dec(q, skb);
q->q.qlen--;
}
} else {
skb = SKB_XOFF_MAGIC;
qdisc_maybe_clear_missed(q, txq);
}
}
if (lock)
spin_unlock(lock);
return skb;
}
static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q)
{
struct sk_buff *skb = skb_peek(&q->skb_bad_txq);
if (unlikely(skb))
skb = __skb_dequeue_bad_txq(q);
return skb;
}
static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
struct sk_buff *skb)
{
spinlock_t *lock = NULL;
if (q->flags & TCQ_F_NOLOCK) {
lock = qdisc_lock(q);
spin_lock(lock);
}
__skb_queue_tail(&q->skb_bad_txq, skb);
if (qdisc_is_percpu_stats(q)) {
qdisc_qstats_cpu_backlog_inc(q, skb);
qdisc_qstats_cpu_qlen_inc(q);
} else {
qdisc_qstats_backlog_inc(q, skb);
q->q.qlen++;
}
if (lock)
spin_unlock(lock);
}
static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
{
spinlock_t *lock = NULL;
if (q->flags & TCQ_F_NOLOCK) {
lock = qdisc_lock(q);
spin_lock(lock);
}
while (skb) {
struct sk_buff *next = skb->next;
__skb_queue_tail(&q->gso_skb, skb);
/* it's still part of the queue */
if (qdisc_is_percpu_stats(q)) {
qdisc_qstats_cpu_requeues_inc(q);
qdisc_qstats_cpu_backlog_inc(q, skb);
qdisc_qstats_cpu_qlen_inc(q);
} else {
q->qstats.requeues++;
qdisc_qstats_backlog_inc(q, skb);
q->q.qlen++;
}
skb = next;
}
if (lock) {
spin_unlock(lock);
set_bit(__QDISC_STATE_MISSED, &q->state);
} else {
__netif_schedule(q);
}
}
static void try_bulk_dequeue_skb(struct Qdisc *q,
struct sk_buff *skb,
const struct netdev_queue *txq,
int *packets)
{
int bytelimit = qdisc_avail_bulklimit(txq) - skb->len;
while (bytelimit > 0) {
struct sk_buff *nskb = q->dequeue(q);
if (!nskb)
break;
bytelimit -= nskb->len; /* covers GSO len */
skb->next = nskb;
skb = nskb;
(*packets)++; /* GSO counts as one pkt */
}
skb_mark_not_on_list(skb);
}
/* This variant of try_bulk_dequeue_skb() makes sure
* all skbs in the chain are for the same txq
*/
static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
struct sk_buff *skb,
int *packets)
{
int mapping = skb_get_queue_mapping(skb);
struct sk_buff *nskb;
int cnt = 0;
do {
nskb = q->dequeue(q);
if (!nskb)
break;
if (unlikely(skb_get_queue_mapping(nskb) != mapping)) {
qdisc_enqueue_skb_bad_txq(q, nskb);
break;
}
skb->next = nskb;
skb = nskb;
} while (++cnt < 8);
(*packets) += cnt;
skb_mark_not_on_list(skb);
}
/* Note that dequeue_skb can possibly return a SKB list (via skb->next).
* A requeued skb (via q->gso_skb) can also be a SKB list.
*/
static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
int *packets)
{
const struct netdev_queue *txq = q->dev_queue;
struct sk_buff *skb = NULL;
*packets = 1;
if (unlikely(!skb_queue_empty(&q->gso_skb))) {
spinlock_t *lock = NULL;
if (q->flags & TCQ_F_NOLOCK) {
lock = qdisc_lock(q);
spin_lock(lock);
}
skb = skb_peek(&q->gso_skb);
/* skb may be null if another cpu pulls gso_skb off in between
* empty check and lock.
*/
if (!skb) {
if (lock)
spin_unlock(lock);
goto validate;
}
/* skb in gso_skb were already validated */
*validate = false;
if (xfrm_offload(skb))
*validate = true;
/* check the reason of requeuing without tx lock first */
txq = skb_get_tx_queue(txq->dev, skb);
if (!netif_xmit_frozen_or_stopped(txq)) {
skb = __skb_dequeue(&q->gso_skb);
if (qdisc_is_percpu_stats(q)) {
qdisc_qstats_cpu_backlog_dec(q, skb);
qdisc_qstats_cpu_qlen_dec(q);
} else {
qdisc_qstats_backlog_dec(q, skb);
q->q.qlen--;
}
} else {
skb = NULL;
qdisc_maybe_clear_missed(q, txq);
}
if (lock)
spin_unlock(lock);
goto trace;
}
validate:
*validate = true;
if ((q->flags & TCQ_F_ONETXQUEUE) &&
netif_xmit_frozen_or_stopped(txq)) {
qdisc_maybe_clear_missed(q, txq);
return skb;
}
skb = qdisc_dequeue_skb_bad_txq(q);
if (unlikely(skb)) {
if (skb == SKB_XOFF_MAGIC)
return NULL;
goto bulk;
}
skb = q->dequeue(q);
if (skb) {
bulk:
if (qdisc_may_bulk(q))
try_bulk_dequeue_skb(q, skb, txq, packets);
else
try_bulk_dequeue_skb_slow(q, skb, packets);
}
trace:
trace_qdisc_dequeue(q, txq, *packets, skb);
return skb;
}
/*
* Transmit possibly several skbs, and handle the return status as
* required. Owning qdisc running bit guarantees that only one CPU
* can execute this function.
*
* Returns to the caller:
* false - hardware queue frozen backoff
* true - feel free to send more pkts
*/
bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
struct net_device *dev, struct netdev_queue *txq,
spinlock_t *root_lock, bool validate)
{
int ret = NETDEV_TX_BUSY;
bool again = false;
/* And release qdisc */
if (root_lock)
spin_unlock(root_lock);
/* Note that we validate skb (GSO, checksum, ...) outside of locks */
if (validate)
skb = validate_xmit_skb_list(skb, dev, &again);
#ifdef CONFIG_XFRM_OFFLOAD
if (unlikely(again)) {
if (root_lock)
spin_lock(root_lock);
dev_requeue_skb(skb, q);
return false;
}
#endif
if (likely(skb)) {
HARD_TX_LOCK(dev, txq, smp_processor_id());
if (!netif_xmit_frozen_or_stopped(txq))
skb = dev_hard_start_xmit(skb, dev, txq, &ret);
else
qdisc_maybe_clear_missed(q, txq);
HARD_TX_UNLOCK(dev, txq);
} else {
if (root_lock)
spin_lock(root_lock);
return true;
}
if (root_lock)
spin_lock(root_lock);
if (!dev_xmit_complete(ret)) {
/* Driver returned NETDEV_TX_BUSY - requeue skb */
if (unlikely(ret != NETDEV_TX_BUSY))
net_warn_ratelimited("BUG %s code %d qlen %d\n",
dev->name, ret, q->q.qlen);
dev_requeue_skb(skb, q);
return false;
}
return true;
}
/*
* NOTE: Called under qdisc_lock(q) with locally disabled BH.
*
* running seqcount guarantees only one CPU can process
* this qdisc at a time. qdisc_lock(q) serializes queue accesses for
* this queue.
*
* netif_tx_lock serializes accesses to device driver.
*
* qdisc_lock(q) and netif_tx_lock are mutually exclusive,
* if one is grabbed, another must be free.
*
* Note, that this procedure can be called by a watchdog timer
*
* Returns to the caller:
* 0 - queue is empty or throttled.
* >0 - queue is not empty.
*
*/
static inline bool qdisc_restart(struct Qdisc *q, int *packets)
{
spinlock_t *root_lock = NULL;
struct netdev_queue *txq;
struct net_device *dev;
struct sk_buff *skb;
bool validate;
/* Dequeue packet */
skb = dequeue_skb(q, &validate, packets);
if (unlikely(!skb))
return false;
if (!(q->flags & TCQ_F_NOLOCK))
root_lock = qdisc_lock(q);
dev = qdisc_dev(q);
txq = skb_get_tx_queue(dev, skb);
return sch_direct_xmit(skb, q, dev, txq, root_lock, validate);
}
void __qdisc_run(struct Qdisc *q)
{
int quota = READ_ONCE(dev_tx_weight);
int packets;
while (qdisc_restart(q, &packets)) {
quota -= packets;
if (quota <= 0) {
if (q->flags & TCQ_F_NOLOCK)
set_bit(__QDISC_STATE_MISSED, &q->state);
else
__netif_schedule(q);
break;
}
}
}
unsigned long dev_trans_start(struct net_device *dev)
{
unsigned long res = READ_ONCE(netdev_get_tx_queue(dev, 0)->trans_start);
unsigned long val;
unsigned int i;
for (i = 1; i < dev->num_tx_queues; i++) {
val = READ_ONCE(netdev_get_tx_queue(dev, i)->trans_start);
if (val && time_after(val, res))
res = val;
}
return res;
}
EXPORT_SYMBOL(dev_trans_start);
static void netif_freeze_queues(struct net_device *dev)
{
unsigned int i;
int cpu;
cpu = smp_processor_id();
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
/* We are the only thread of execution doing a
* freeze, but we have to grab the _xmit_lock in
* order to synchronize with threads which are in
* the ->hard_start_xmit() handler and already
* checked the frozen bit.
*/
__netif_tx_lock(txq, cpu);
set_bit(__QUEUE_STATE_FROZEN, &txq->state);
__netif_tx_unlock(txq);
}
}
void netif_tx_lock(struct net_device *dev)
{
spin_lock(&dev->tx_global_lock);
netif_freeze_queues(dev);
}
EXPORT_SYMBOL(netif_tx_lock);
static void netif_unfreeze_queues(struct net_device *dev)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
/* No need to grab the _xmit_lock here. If the
* queue is not stopped for another reason, we
* force a schedule.
*/
clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
netif_schedule_queue(txq);
}
}
void netif_tx_unlock(struct net_device *dev)
{
netif_unfreeze_queues(dev);
spin_unlock(&dev->tx_global_lock);
}
EXPORT_SYMBOL(netif_tx_unlock);
static void dev_watchdog(struct timer_list *t)
{
struct net_device *dev = from_timer(dev, t, watchdog_timer);
bool release = true;
spin_lock(&dev->tx_global_lock);
if (!qdisc_tx_is_noop(dev)) {
if (netif_device_present(dev) &&
netif_running(dev) &&
netif_carrier_ok(dev)) {
unsigned int timedout_ms = 0;
unsigned int i;
unsigned long trans_start;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq;
txq = netdev_get_tx_queue(dev, i);
trans_start = READ_ONCE(txq->trans_start);
if (netif_xmit_stopped(txq) &&
time_after(jiffies, (trans_start +
dev->watchdog_timeo))) {
timedout_ms = jiffies_to_msecs(jiffies - trans_start);
atomic_long_inc(&txq->trans_timeout);
break;
}
}
if (unlikely(timedout_ms)) {
trace_net_dev_xmit_timeout(dev, i);
WARN_ONCE(1, "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out %u ms\n",
dev->name, netdev_drivername(dev), i, timedout_ms);
netif_freeze_queues(dev);
dev->netdev_ops->ndo_tx_timeout(dev, i);
netif_unfreeze_queues(dev);
}
if (!mod_timer(&dev->watchdog_timer,
round_jiffies(jiffies +
dev->watchdog_timeo)))
release = false;
}
}
spin_unlock(&dev->tx_global_lock);
if (release)
netdev_put(dev, &dev->watchdog_dev_tracker);
}
void __netdev_watchdog_up(struct net_device *dev)
{
if (dev->netdev_ops->ndo_tx_timeout) {
if (dev->watchdog_timeo <= 0)
dev->watchdog_timeo = 5*HZ;
if (!mod_timer(&dev->watchdog_timer,
round_jiffies(jiffies + dev->watchdog_timeo)))
netdev_hold(dev, &dev->watchdog_dev_tracker,
GFP_ATOMIC);
}
}
EXPORT_SYMBOL_GPL(__netdev_watchdog_up);
static void dev_watchdog_up(struct net_device *dev)
{
__netdev_watchdog_up(dev);
}
static void dev_watchdog_down(struct net_device *dev)
{
netif_tx_lock_bh(dev);
if (del_timer(&dev->watchdog_timer))
netdev_put(dev, &dev->watchdog_dev_tracker);
netif_tx_unlock_bh(dev);
}
/**
* netif_carrier_on - set carrier
* @dev: network device
*
* Device has detected acquisition of carrier.
*/
void netif_carrier_on(struct net_device *dev)
{
if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
if (dev->reg_state == NETREG_UNINITIALIZED)
return;
atomic_inc(&dev->carrier_up_count);
linkwatch_fire_event(dev);
if (netif_running(dev))
__netdev_watchdog_up(dev);
}
}
EXPORT_SYMBOL(netif_carrier_on);
/**
* netif_carrier_off - clear carrier
* @dev: network device
*
* Device has detected loss of carrier.
*/
void netif_carrier_off(struct net_device *dev)
{
if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
if (dev->reg_state == NETREG_UNINITIALIZED)
return;
atomic_inc(&dev->carrier_down_count);
linkwatch_fire_event(dev);
}
}
EXPORT_SYMBOL(netif_carrier_off);
/**
* netif_carrier_event - report carrier state event
* @dev: network device
*
* Device has detected a carrier event but the carrier state wasn't changed.
* Use in drivers when querying carrier state asynchronously, to avoid missing
* events (link flaps) if link recovers before it's queried.
*/
void netif_carrier_event(struct net_device *dev)
{
if (dev->reg_state == NETREG_UNINITIALIZED)
return;
atomic_inc(&dev->carrier_up_count);
atomic_inc(&dev->carrier_down_count);
linkwatch_fire_event(dev);
}
EXPORT_SYMBOL_GPL(netif_carrier_event);
/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
under all circumstances. It is difficult to invent anything faster or
cheaper.
*/
static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
struct sk_buff **to_free)
{
__qdisc_drop(skb, to_free);
return NET_XMIT_CN;
}
static struct sk_buff *noop_dequeue(struct Qdisc *qdisc)
{
return NULL;
}
struct Qdisc_ops noop_qdisc_ops __read_mostly = {
.id = "noop",
.priv_size = 0,
.enqueue = noop_enqueue,
.dequeue = noop_dequeue,
.peek = noop_dequeue,
.owner = THIS_MODULE,
};
static struct netdev_queue noop_netdev_queue = {
RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc),
RCU_POINTER_INITIALIZER(qdisc_sleeping, &noop_qdisc),
};
struct Qdisc noop_qdisc = {
.enqueue = noop_enqueue,
.dequeue = noop_dequeue,
.flags = TCQ_F_BUILTIN,
.ops = &noop_qdisc_ops,
.q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
.dev_queue = &noop_netdev_queue,
.busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
.gso_skb = {
.next = (struct sk_buff *)&noop_qdisc.gso_skb,
.prev = (struct sk_buff *)&noop_qdisc.gso_skb,
.qlen = 0,
.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.gso_skb.lock),
},
.skb_bad_txq = {
.next = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
.prev = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
.qlen = 0,
.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock),
},
};
EXPORT_SYMBOL(noop_qdisc);
static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
/* register_qdisc() assigns a default of noop_enqueue if unset,
* but __dev_queue_xmit() treats noqueue only as such
* if this is NULL - so clear it here. */
qdisc->enqueue = NULL;
return 0;
}
struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
.id = "noqueue",
.priv_size = 0,
.init = noqueue_init,
.enqueue = noop_enqueue,
.dequeue = noop_dequeue,
.peek = noop_dequeue,
.owner = THIS_MODULE,
};
static const u8 prio2band[TC_PRIO_MAX + 1] = {
1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
};
/* 3-band FIFO queue: old style, but should be a bit faster than
generic prio+fifo combination.
*/
#define PFIFO_FAST_BANDS 3
/*
* Private data for a pfifo_fast scheduler containing:
* - rings for priority bands
*/
struct pfifo_fast_priv {
struct skb_array q[PFIFO_FAST_BANDS];
};
static inline struct skb_array *band2list(struct pfifo_fast_priv *priv,
int band)
{
return &priv->q[band];
}
static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
struct sk_buff **to_free)
{
int band = prio2band[skb->priority & TC_PRIO_MAX];
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
struct skb_array *q = band2list(priv, band);
unsigned int pkt_len = qdisc_pkt_len(skb);
int err;
err = skb_array_produce(q, skb);
if (unlikely(err)) {
if (qdisc_is_percpu_stats(qdisc))
return qdisc_drop_cpu(skb, qdisc, to_free);
else
return qdisc_drop(skb, qdisc, to_free);
}
qdisc_update_stats_at_enqueue(qdisc, pkt_len);
return NET_XMIT_SUCCESS;
}
static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
{
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
struct sk_buff *skb = NULL;
bool need_retry = true;
int band;
retry:
for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
struct skb_array *q = band2list(priv, band);
if (__skb_array_empty(q))
continue;
skb = __skb_array_consume(q);
}
if (likely(skb)) {
qdisc_update_stats_at_dequeue(qdisc, skb);
} else if (need_retry &&
READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY) {
/* Delay clearing the STATE_MISSED here to reduce
* the overhead of the second spin_trylock() in
* qdisc_run_begin() and __netif_schedule() calling
* in qdisc_run_end().
*/
clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
/* Make sure dequeuing happens after clearing
* STATE_MISSED.
*/
smp_mb__after_atomic();
need_retry = false;
goto retry;
}
return skb;
}
static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
{
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
struct sk_buff *skb = NULL;
int band;
for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
struct skb_array *q = band2list(priv, band);
skb = __skb_array_peek(q);
}
return skb;
}
static void pfifo_fast_reset(struct Qdisc *qdisc)
{
int i, band;
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
for (band = 0; band < PFIFO_FAST_BANDS; band++) {
struct skb_array *q = band2list(priv, band);
struct sk_buff *skb;
/* NULL ring is possible if destroy path is due to a failed
* skb_array_init() in pfifo_fast_init() case.
*/
if (!q->ring.queue)
continue;
while ((skb = __skb_array_consume(q)) != NULL)
kfree_skb(skb);
}
if (qdisc_is_percpu_stats(qdisc)) {
for_each_possible_cpu(i) {
struct gnet_stats_queue *q;
q = per_cpu_ptr(qdisc->cpu_qstats, i);
q->backlog = 0;
q->qlen = 0;
}
}
}
static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
{
struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
goto nla_put_failure;
return skb->len;
nla_put_failure:
return -1;
}
static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len;
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
int prio;
/* guard against zero length rings */
if (!qlen)
return -EINVAL;
for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
struct skb_array *q = band2list(priv, prio);
int err;
err = skb_array_init(q, qlen, GFP_KERNEL);
if (err)
return -ENOMEM;
}
/* Can by-pass the queue discipline */
qdisc->flags |= TCQ_F_CAN_BYPASS;
return 0;
}
static void pfifo_fast_destroy(struct Qdisc *sch)
{
struct pfifo_fast_priv *priv = qdisc_priv(sch);
int prio;
for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
struct skb_array *q = band2list(priv, prio);
/* NULL ring is possible if destroy path is due to a failed
* skb_array_init() in pfifo_fast_init() case.
*/
if (!q->ring.queue)
continue;
/* Destroy ring but no need to kfree_skb because a call to
* pfifo_fast_reset() has already done that work.
*/
ptr_ring_cleanup(&q->ring, NULL);
}
}
static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch,
unsigned int new_len)
{
struct pfifo_fast_priv *priv = qdisc_priv(sch);
struct skb_array *bands[PFIFO_FAST_BANDS];
int prio;
for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
struct skb_array *q = band2list(priv, prio);
bands[prio] = q;
}
return skb_array_resize_multiple(bands, PFIFO_FAST_BANDS, new_len,
GFP_KERNEL);
}
struct Qdisc_ops pfifo_fast_ops __read_mostly = {
.id = "pfifo_fast",
.priv_size = sizeof(struct pfifo_fast_priv),
.enqueue = pfifo_fast_enqueue,
.dequeue = pfifo_fast_dequeue,
.peek = pfifo_fast_peek,
.init = pfifo_fast_init,
.destroy = pfifo_fast_destroy,
.reset = pfifo_fast_reset,
.dump = pfifo_fast_dump,
.change_tx_queue_len = pfifo_fast_change_tx_queue_len,
.owner = THIS_MODULE,
.static_flags = TCQ_F_NOLOCK | TCQ_F_CPUSTATS,
};
EXPORT_SYMBOL(pfifo_fast_ops);
static struct lock_class_key qdisc_tx_busylock;
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops,
struct netlink_ext_ack *extack)
{
struct Qdisc *sch;
unsigned int size = sizeof(*sch) + ops->priv_size;
int err = -ENOBUFS;
struct net_device *dev;
if (!dev_queue) {
NL_SET_ERR_MSG(extack, "No device queue given");
err = -EINVAL;
goto errout;
}
dev = dev_queue->dev;
sch = kzalloc_node(size, GFP_KERNEL, netdev_queue_numa_node_read(dev_queue));
if (!sch)
goto errout;
__skb_queue_head_init(&sch->gso_skb);
__skb_queue_head_init(&sch->skb_bad_txq);
gnet_stats_basic_sync_init(&sch->bstats);
spin_lock_init(&sch->q.lock);
if (ops->static_flags & TCQ_F_CPUSTATS) {
sch->cpu_bstats =
netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
if (!sch->cpu_bstats)
goto errout1;
sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
if (!sch->cpu_qstats) {
free_percpu(sch->cpu_bstats);
goto errout1;
}
}
spin_lock_init(&sch->busylock);
lockdep_set_class(&sch->busylock,
dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
/* seqlock has the same scope of busylock, for NOLOCK qdisc */
spin_lock_init(&sch->seqlock);
lockdep_set_class(&sch->seqlock,
dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
sch->ops = ops;
sch->flags = ops->static_flags;
sch->enqueue = ops->enqueue;
sch->dequeue = ops->dequeue;
sch->dev_queue = dev_queue;
netdev_hold(dev, &sch->dev_tracker, GFP_KERNEL);
refcount_set(&sch->refcnt, 1);
return sch;
errout1:
kfree(sch);
errout:
return ERR_PTR(err);
}
struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops,
unsigned int parentid,
struct netlink_ext_ack *extack)
{
struct Qdisc *sch;
if (!try_module_get(ops->owner)) {
NL_SET_ERR_MSG(extack, "Failed to increase module reference counter");
return NULL;
}
sch = qdisc_alloc(dev_queue, ops, extack);
if (IS_ERR(sch)) {
module_put(ops->owner);
return NULL;
}
sch->parent = parentid;
if (!ops->init || ops->init(sch, NULL, extack) == 0) {
trace_qdisc_create(ops, dev_queue->dev, parentid);
return sch;
}
qdisc_put(sch);
return NULL;
}
EXPORT_SYMBOL(qdisc_create_dflt);
/* Under qdisc_lock(qdisc) and BH! */
void qdisc_reset(struct Qdisc *qdisc)
{
const struct Qdisc_ops *ops = qdisc->ops;
trace_qdisc_reset(qdisc);
if (ops->reset)
ops->reset(qdisc);
__skb_queue_purge(&qdisc->gso_skb);
__skb_queue_purge(&qdisc->skb_bad_txq);
qdisc->q.qlen = 0;
qdisc->qstats.backlog = 0;
}
EXPORT_SYMBOL(qdisc_reset);
void qdisc_free(struct Qdisc *qdisc)
{
if (qdisc_is_percpu_stats(qdisc)) {
free_percpu(qdisc->cpu_bstats);
free_percpu(qdisc->cpu_qstats);
}
kfree(qdisc);
}
static void qdisc_free_cb(struct rcu_head *head)
{
struct Qdisc *q = container_of(head, struct Qdisc, rcu);
qdisc_free(q);
}
static void __qdisc_destroy(struct Qdisc *qdisc)
{
const struct Qdisc_ops *ops = qdisc->ops;
#ifdef CONFIG_NET_SCHED
qdisc_hash_del(qdisc);
qdisc_put_stab(rtnl_dereference(qdisc->stab));
#endif
gen_kill_estimator(&qdisc->rate_est);
qdisc_reset(qdisc);
if (ops->destroy)
ops->destroy(qdisc);
module_put(ops->owner);
netdev_put(qdisc_dev(qdisc), &qdisc->dev_tracker);
trace_qdisc_destroy(qdisc);
call_rcu(&qdisc->rcu, qdisc_free_cb);
}
void qdisc_destroy(struct Qdisc *qdisc)
{
if (qdisc->flags & TCQ_F_BUILTIN)
return;
__qdisc_destroy(qdisc);
}
void qdisc_put(struct Qdisc *qdisc)
{
if (!qdisc)
return;
if (qdisc->flags & TCQ_F_BUILTIN ||
!refcount_dec_and_test(&qdisc->refcnt))
return;
__qdisc_destroy(qdisc);
}
EXPORT_SYMBOL(qdisc_put);
/* Version of qdisc_put() that is called with rtnl mutex unlocked.
* Intended to be used as optimization, this function only takes rtnl lock if
* qdisc reference counter reached zero.
*/
void qdisc_put_unlocked(struct Qdisc *qdisc)
{
if (qdisc->flags & TCQ_F_BUILTIN ||
!refcount_dec_and_rtnl_lock(&qdisc->refcnt))
return;
__qdisc_destroy(qdisc);
rtnl_unlock();
}
EXPORT_SYMBOL(qdisc_put_unlocked);
/* Attach toplevel qdisc to device queue. */
struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
struct Qdisc *qdisc)
{
struct Qdisc *oqdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
spinlock_t *root_lock;
root_lock = qdisc_lock(oqdisc);
spin_lock_bh(root_lock);
/* ... and graft new one */
if (qdisc == NULL)
qdisc = &noop_qdisc;
rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
spin_unlock_bh(root_lock);
return oqdisc;
}
EXPORT_SYMBOL(dev_graft_qdisc);
static void shutdown_scheduler_queue(struct net_device *dev,
struct netdev_queue *dev_queue,
void *_qdisc_default)
{
struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
struct Qdisc *qdisc_default = _qdisc_default;
if (qdisc) {
rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc_default);
qdisc_put(qdisc);
}
}
static void attach_one_default_qdisc(struct net_device *dev,
struct netdev_queue *dev_queue,
void *_unused)
{
struct Qdisc *qdisc;
const struct Qdisc_ops *ops = default_qdisc_ops;
if (dev->priv_flags & IFF_NO_QUEUE)
ops = &noqueue_qdisc_ops;
else if(dev->type == ARPHRD_CAN)
ops = &pfifo_fast_ops;
qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL);
if (!qdisc)
return;
if (!netif_is_multiqueue(dev))
qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
}
static void attach_default_qdiscs(struct net_device *dev)
{
struct netdev_queue *txq;
struct Qdisc *qdisc;
txq = netdev_get_tx_queue(dev, 0);
if (!netif_is_multiqueue(dev) ||
dev->priv_flags & IFF_NO_QUEUE) {
netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
qdisc = rtnl_dereference(txq->qdisc_sleeping);
rcu_assign_pointer(dev->qdisc, qdisc);
qdisc_refcount_inc(qdisc);
} else {
qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL);
if (qdisc) {
rcu_assign_pointer(dev->qdisc, qdisc);
qdisc->ops->attach(qdisc);
}
}
qdisc = rtnl_dereference(dev->qdisc);
/* Detect default qdisc setup/init failed and fallback to "noqueue" */
if (qdisc == &noop_qdisc) {
netdev_warn(dev, "default qdisc (%s) fail, fallback to %s\n",
default_qdisc_ops->id, noqueue_qdisc_ops.id);
netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
dev->priv_flags |= IFF_NO_QUEUE;
netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
qdisc = rtnl_dereference(txq->qdisc_sleeping);
rcu_assign_pointer(dev->qdisc, qdisc);
qdisc_refcount_inc(qdisc);
dev->priv_flags ^= IFF_NO_QUEUE;
}
#ifdef CONFIG_NET_SCHED
if (qdisc != &noop_qdisc)
qdisc_hash_add(qdisc, false);
#endif
}
static void transition_one_qdisc(struct net_device *dev,
struct netdev_queue *dev_queue,
void *_need_watchdog)
{
struct Qdisc *new_qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
int *need_watchdog_p = _need_watchdog;
if (!(new_qdisc->flags & TCQ_F_BUILTIN))
clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
if (need_watchdog_p) {
WRITE_ONCE(dev_queue->trans_start, 0);
*need_watchdog_p = 1;
}
}
void dev_activate(struct net_device *dev)
{
int need_watchdog;
/* No queueing discipline is attached to device;
* create default one for devices, which need queueing
* and noqueue_qdisc for virtual interfaces
*/
if (rtnl_dereference(dev->qdisc) == &noop_qdisc)
attach_default_qdiscs(dev);
if (!netif_carrier_ok(dev))
/* Delay activation until next carrier-on event */
return;
need_watchdog = 0;
netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
if (dev_ingress_queue(dev))
transition_one_qdisc(dev, dev_ingress_queue(dev), NULL);
if (need_watchdog) {
netif_trans_update(dev);
dev_watchdog_up(dev);
}
}
EXPORT_SYMBOL(dev_activate);
static void qdisc_deactivate(struct Qdisc *qdisc)
{
if (qdisc->flags & TCQ_F_BUILTIN)
return;
set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
}
static void dev_deactivate_queue(struct net_device *dev,
struct netdev_queue *dev_queue,
void *_qdisc_default)
{
struct Qdisc *qdisc_default = _qdisc_default;
struct Qdisc *qdisc;
qdisc = rtnl_dereference(dev_queue->qdisc);
if (qdisc) {
qdisc_deactivate(qdisc);
rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
}
}
static void dev_reset_queue(struct net_device *dev,
struct netdev_queue *dev_queue,
void *_unused)
{
struct Qdisc *qdisc;
bool nolock;
qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
if (!qdisc)
return;
nolock = qdisc->flags & TCQ_F_NOLOCK;
if (nolock)
spin_lock_bh(&qdisc->seqlock);
spin_lock_bh(qdisc_lock(qdisc));
qdisc_reset(qdisc);
spin_unlock_bh(qdisc_lock(qdisc));
if (nolock) {
clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
spin_unlock_bh(&qdisc->seqlock);
}
}
static bool some_qdisc_is_busy(struct net_device *dev)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *dev_queue;
spinlock_t *root_lock;
struct Qdisc *q;
int val;
dev_queue = netdev_get_tx_queue(dev, i);
q = rtnl_dereference(dev_queue->qdisc_sleeping);
root_lock = qdisc_lock(q);
spin_lock_bh(root_lock);
val = (qdisc_is_running(q) ||
test_bit(__QDISC_STATE_SCHED, &q->state));
spin_unlock_bh(root_lock);
if (val)
return true;
}
return false;
}
/**
* dev_deactivate_many - deactivate transmissions on several devices
* @head: list of devices to deactivate
*
* This function returns only when all outstanding transmissions
* have completed, unless all devices are in dismantle phase.
*/
void dev_deactivate_many(struct list_head *head)
{
struct net_device *dev;
list_for_each_entry(dev, head, close_list) {
netdev_for_each_tx_queue(dev, dev_deactivate_queue,
&noop_qdisc);
if (dev_ingress_queue(dev))
dev_deactivate_queue(dev, dev_ingress_queue(dev),
&noop_qdisc);
dev_watchdog_down(dev);
}
/* Wait for outstanding qdisc-less dev_queue_xmit calls or
* outstanding qdisc enqueuing calls.
* This is avoided if all devices are in dismantle phase :
* Caller will call synchronize_net() for us
*/
synchronize_net();
list_for_each_entry(dev, head, close_list) {
netdev_for_each_tx_queue(dev, dev_reset_queue, NULL);
if (dev_ingress_queue(dev))
dev_reset_queue(dev, dev_ingress_queue(dev), NULL);
}
/* Wait for outstanding qdisc_run calls. */
list_for_each_entry(dev, head, close_list) {
while (some_qdisc_is_busy(dev)) {
/* wait_event() would avoid this sleep-loop but would
* require expensive checks in the fast paths of packet
* processing which isn't worth it.
*/
schedule_timeout_uninterruptible(1);
}
}
}
void dev_deactivate(struct net_device *dev)
{
LIST_HEAD(single);
list_add(&dev->close_list, &single);
dev_deactivate_many(&single);
list_del(&single);
}
EXPORT_SYMBOL(dev_deactivate);
static int qdisc_change_tx_queue_len(struct net_device *dev,
struct netdev_queue *dev_queue)
{
struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
const struct Qdisc_ops *ops = qdisc->ops;
if (ops->change_tx_queue_len)
return ops->change_tx_queue_len(qdisc, dev->tx_queue_len);
return 0;
}
void dev_qdisc_change_real_num_tx(struct net_device *dev,
unsigned int new_real_tx)
{
struct Qdisc *qdisc = rtnl_dereference(dev->qdisc);
if (qdisc->ops->change_real_num_tx)
qdisc->ops->change_real_num_tx(qdisc, new_real_tx);
}
void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx)
{
#ifdef CONFIG_NET_SCHED
struct net_device *dev = qdisc_dev(sch);
struct Qdisc *qdisc;
unsigned int i;
for (i = new_real_tx; i < dev->real_num_tx_queues; i++) {
qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping);
/* Only update the default qdiscs we created,
* qdiscs with handles are always hashed.
*/
if (qdisc != &noop_qdisc && !qdisc->handle)
qdisc_hash_del(qdisc);
}
for (i = dev->real_num_tx_queues; i < new_real_tx; i++) {
qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping);
if (qdisc != &noop_qdisc && !qdisc->handle)
qdisc_hash_add(qdisc, false);
}
#endif
}
EXPORT_SYMBOL(mq_change_real_num_tx);
int dev_qdisc_change_tx_queue_len(struct net_device *dev)
{
bool up = dev->flags & IFF_UP;
unsigned int i;
int ret = 0;
if (up)
dev_deactivate(dev);
for (i = 0; i < dev->num_tx_queues; i++) {
ret = qdisc_change_tx_queue_len(dev, &dev->_tx[i]);
/* TODO: revert changes on a partial failure */
if (ret)
break;
}
if (up)
dev_activate(dev);
return ret;
}
static void dev_init_scheduler_queue(struct net_device *dev,
struct netdev_queue *dev_queue,
void *_qdisc)
{
struct Qdisc *qdisc = _qdisc;
rcu_assign_pointer(dev_queue->qdisc, qdisc);
rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
}
void dev_init_scheduler(struct net_device *dev)
{
rcu_assign_pointer(dev->qdisc, &noop_qdisc);
netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
if (dev_ingress_queue(dev))
dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
timer_setup(&dev->watchdog_timer, dev_watchdog, 0);
}
void dev_shutdown(struct net_device *dev)
{
netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
if (dev_ingress_queue(dev))
shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
qdisc_put(rtnl_dereference(dev->qdisc));
rcu_assign_pointer(dev->qdisc, &noop_qdisc);
WARN_ON(timer_pending(&dev->watchdog_timer));
}
/**
* psched_ratecfg_precompute__() - Pre-compute values for reciprocal division
* @rate: Rate to compute reciprocal division values of
* @mult: Multiplier for reciprocal division
* @shift: Shift for reciprocal division
*
* The multiplier and shift for reciprocal division by rate are stored
* in mult and shift.
*
* The deal here is to replace a divide by a reciprocal one
* in fast path (a reciprocal divide is a multiply and a shift)
*
* Normal formula would be :
* time_in_ns = (NSEC_PER_SEC * len) / rate_bps
*
* We compute mult/shift to use instead :
* time_in_ns = (len * mult) >> shift;
*
* We try to get the highest possible mult value for accuracy,
* but have to make sure no overflows will ever happen.
*
* reciprocal_value() is not used here it doesn't handle 64-bit values.
*/
static void psched_ratecfg_precompute__(u64 rate, u32 *mult, u8 *shift)
{
u64 factor = NSEC_PER_SEC;
*mult = 1;
*shift = 0;
if (rate <= 0)
return;
for (;;) {
*mult = div64_u64(factor, rate);
if (*mult & (1U << 31) || factor & (1ULL << 63))
break;
factor <<= 1;
(*shift)++;
}
}
void psched_ratecfg_precompute(struct psched_ratecfg *r,
const struct tc_ratespec *conf,
u64 rate64)
{
memset(r, 0, sizeof(*r));
r->overhead = conf->overhead;
r->mpu = conf->mpu;
r->rate_bytes_ps = max_t(u64, conf->rate, rate64);
r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
psched_ratecfg_precompute__(r->rate_bytes_ps, &r->mult, &r->shift);
}
EXPORT_SYMBOL(psched_ratecfg_precompute);
void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64)
{
r->rate_pkts_ps = pktrate64;
psched_ratecfg_precompute__(r->rate_pkts_ps, &r->mult, &r->shift);
}
EXPORT_SYMBOL(psched_ppscfg_precompute);
void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
struct tcf_proto *tp_head)
{
/* Protected with chain0->filter_chain_lock.
* Can't access chain directly because tp_head can be NULL.
*/
struct mini_Qdisc *miniq_old =
rcu_dereference_protected(*miniqp->p_miniq, 1);
struct mini_Qdisc *miniq;
if (!tp_head) {
RCU_INIT_POINTER(*miniqp->p_miniq, NULL);
} else {
miniq = miniq_old != &miniqp->miniq1 ?
&miniqp->miniq1 : &miniqp->miniq2;
/* We need to make sure that readers won't see the miniq
* we are about to modify. So ensure that at least one RCU
* grace period has elapsed since the miniq was made
* inactive.
*/
if (IS_ENABLED(CONFIG_PREEMPT_RT))
cond_synchronize_rcu(miniq->rcu_state);
else if (!poll_state_synchronize_rcu(miniq->rcu_state))
synchronize_rcu_expedited();
miniq->filter_list = tp_head;
rcu_assign_pointer(*miniqp->p_miniq, miniq);
}
if (miniq_old)
/* This is counterpart of the rcu sync above. We need to
* block potential new user of miniq_old until all readers
* are not seeing it.
*/
miniq_old->rcu_state = start_poll_synchronize_rcu();
}
EXPORT_SYMBOL(mini_qdisc_pair_swap);
void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
struct tcf_block *block)
{
miniqp->miniq1.block = block;
miniqp->miniq2.block = block;
}
EXPORT_SYMBOL(mini_qdisc_pair_block_init);
void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
struct mini_Qdisc __rcu **p_miniq)
{
miniqp->miniq1.cpu_bstats = qdisc->cpu_bstats;
miniqp->miniq1.cpu_qstats = qdisc->cpu_qstats;
miniqp->miniq2.cpu_bstats = qdisc->cpu_bstats;
miniqp->miniq2.cpu_qstats = qdisc->cpu_qstats;
miniqp->miniq1.rcu_state = get_state_synchronize_rcu();
miniqp->miniq2.rcu_state = miniqp->miniq1.rcu_state;
miniqp->p_miniq = p_miniq;
}
EXPORT_SYMBOL(mini_qdisc_pair_init);
| linux-master | net/sched/sch_generic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* net/sched/act_meta_mark.c IFE skb->mark metadata module
*
* copyright Jamal Hadi Salim (2015)
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/module.h>
#include <linux/init.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <uapi/linux/tc_act/tc_ife.h>
#include <net/tc_act/tc_ife.h>
static int skbmark_encode(struct sk_buff *skb, void *skbdata,
struct tcf_meta_info *e)
{
u32 ifemark = skb->mark;
return ife_encode_meta_u32(ifemark, skbdata, e);
}
static int skbmark_decode(struct sk_buff *skb, void *data, u16 len)
{
u32 ifemark = *(u32 *)data;
skb->mark = ntohl(ifemark);
return 0;
}
static int skbmark_check(struct sk_buff *skb, struct tcf_meta_info *e)
{
return ife_check_meta_u32(skb->mark, e);
}
static struct tcf_meta_ops ife_skbmark_ops = {
.metaid = IFE_META_SKBMARK,
.metatype = NLA_U32,
.name = "skbmark",
.synopsis = "skb mark 32 bit metadata",
.check_presence = skbmark_check,
.encode = skbmark_encode,
.decode = skbmark_decode,
.get = ife_get_meta_u32,
.alloc = ife_alloc_meta_u32,
.release = ife_release_meta_gen,
.validate = ife_validate_meta_u32,
.owner = THIS_MODULE,
};
static int __init ifemark_init_module(void)
{
return register_ife_op(&ife_skbmark_ops);
}
static void __exit ifemark_cleanup_module(void)
{
unregister_ife_op(&ife_skbmark_ops);
}
module_init(ifemark_init_module);
module_exit(ifemark_cleanup_module);
MODULE_AUTHOR("Jamal Hadi Salim(2015)");
MODULE_DESCRIPTION("Inter-FE skb mark metadata module");
MODULE_LICENSE("GPL");
MODULE_ALIAS_IFE_META("skbmark");
| linux-master | net/sched/act_meta_mark.c |
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* COMMON Applications Kept Enhanced (CAKE) discipline
*
* Copyright (C) 2014-2018 Jonathan Morton <[email protected]>
* Copyright (C) 2015-2018 Toke Høiland-Jørgensen <[email protected]>
* Copyright (C) 2014-2018 Dave Täht <[email protected]>
* Copyright (C) 2015-2018 Sebastian Moeller <[email protected]>
* (C) 2015-2018 Kevin Darbyshire-Bryant <[email protected]>
* Copyright (C) 2017-2018 Ryan Mounce <[email protected]>
*
* The CAKE Principles:
* (or, how to have your cake and eat it too)
*
* This is a combination of several shaping, AQM and FQ techniques into one
* easy-to-use package:
*
* - An overall bandwidth shaper, to move the bottleneck away from dumb CPE
* equipment and bloated MACs. This operates in deficit mode (as in sch_fq),
* eliminating the need for any sort of burst parameter (eg. token bucket
* depth). Burst support is limited to that necessary to overcome scheduling
* latency.
*
* - A Diffserv-aware priority queue, giving more priority to certain classes,
* up to a specified fraction of bandwidth. Above that bandwidth threshold,
* the priority is reduced to avoid starving other tins.
*
* - Each priority tin has a separate Flow Queue system, to isolate traffic
* flows from each other. This prevents a burst on one flow from increasing
* the delay to another. Flows are distributed to queues using a
* set-associative hash function.
*
* - Each queue is actively managed by Cobalt, which is a combination of the
* Codel and Blue AQM algorithms. This serves flows fairly, and signals
* congestion early via ECN (if available) and/or packet drops, to keep
* latency low. The codel parameters are auto-tuned based on the bandwidth
* setting, as is necessary at low bandwidths.
*
* The configuration parameters are kept deliberately simple for ease of use.
* Everything has sane defaults. Complete generality of configuration is *not*
* a goal.
*
* The priority queue operates according to a weighted DRR scheme, combined with
* a bandwidth tracker which reuses the shaper logic to detect which side of the
* bandwidth sharing threshold the tin is operating. This determines whether a
* priority-based weight (high) or a bandwidth-based weight (low) is used for
* that tin in the current pass.
*
* This qdisc was inspired by Eric Dumazet's fq_codel code, which he kindly
* granted us permission to leverage.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/string.h>
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/jhash.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/reciprocal_div.h>
#include <net/netlink.h>
#include <linux/if_vlan.h>
#include <net/gso.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
#include <net/tcp.h>
#include <net/flow_dissector.h>
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <net/netfilter/nf_conntrack_core.h>
#endif
#define CAKE_SET_WAYS (8)
#define CAKE_MAX_TINS (8)
#define CAKE_QUEUES (1024)
#define CAKE_FLOW_MASK 63
#define CAKE_FLOW_NAT_FLAG 64
/* struct cobalt_params - contains codel and blue parameters
* @interval: codel initial drop rate
* @target: maximum persistent sojourn time & blue update rate
* @mtu_time: serialisation delay of maximum-size packet
* @p_inc: increment of blue drop probability (0.32 fxp)
* @p_dec: decrement of blue drop probability (0.32 fxp)
*/
struct cobalt_params {
u64 interval;
u64 target;
u64 mtu_time;
u32 p_inc;
u32 p_dec;
};
/* struct cobalt_vars - contains codel and blue variables
* @count: codel dropping frequency
* @rec_inv_sqrt: reciprocal value of sqrt(count) >> 1
* @drop_next: time to drop next packet, or when we dropped last
* @blue_timer: Blue time to next drop
* @p_drop: BLUE drop probability (0.32 fxp)
* @dropping: set if in dropping state
* @ecn_marked: set if marked
*/
struct cobalt_vars {
u32 count;
u32 rec_inv_sqrt;
ktime_t drop_next;
ktime_t blue_timer;
u32 p_drop;
bool dropping;
bool ecn_marked;
};
enum {
CAKE_SET_NONE = 0,
CAKE_SET_SPARSE,
CAKE_SET_SPARSE_WAIT, /* counted in SPARSE, actually in BULK */
CAKE_SET_BULK,
CAKE_SET_DECAYING
};
struct cake_flow {
/* this stuff is all needed per-flow at dequeue time */
struct sk_buff *head;
struct sk_buff *tail;
struct list_head flowchain;
s32 deficit;
u32 dropped;
struct cobalt_vars cvars;
u16 srchost; /* index into cake_host table */
u16 dsthost;
u8 set;
}; /* please try to keep this structure <= 64 bytes */
struct cake_host {
u32 srchost_tag;
u32 dsthost_tag;
u16 srchost_bulk_flow_count;
u16 dsthost_bulk_flow_count;
};
struct cake_heap_entry {
u16 t:3, b:10;
};
struct cake_tin_data {
struct cake_flow flows[CAKE_QUEUES];
u32 backlogs[CAKE_QUEUES];
u32 tags[CAKE_QUEUES]; /* for set association */
u16 overflow_idx[CAKE_QUEUES];
struct cake_host hosts[CAKE_QUEUES]; /* for triple isolation */
u16 flow_quantum;
struct cobalt_params cparams;
u32 drop_overlimit;
u16 bulk_flow_count;
u16 sparse_flow_count;
u16 decaying_flow_count;
u16 unresponsive_flow_count;
u32 max_skblen;
struct list_head new_flows;
struct list_head old_flows;
struct list_head decaying_flows;
/* time_next = time_this + ((len * rate_ns) >> rate_shft) */
ktime_t time_next_packet;
u64 tin_rate_ns;
u64 tin_rate_bps;
u16 tin_rate_shft;
u16 tin_quantum;
s32 tin_deficit;
u32 tin_backlog;
u32 tin_dropped;
u32 tin_ecn_mark;
u32 packets;
u64 bytes;
u32 ack_drops;
/* moving averages */
u64 avge_delay;
u64 peak_delay;
u64 base_delay;
/* hash function stats */
u32 way_directs;
u32 way_hits;
u32 way_misses;
u32 way_collisions;
}; /* number of tins is small, so size of this struct doesn't matter much */
struct cake_sched_data {
struct tcf_proto __rcu *filter_list; /* optional external classifier */
struct tcf_block *block;
struct cake_tin_data *tins;
struct cake_heap_entry overflow_heap[CAKE_QUEUES * CAKE_MAX_TINS];
u16 overflow_timeout;
u16 tin_cnt;
u8 tin_mode;
u8 flow_mode;
u8 ack_filter;
u8 atm_mode;
u32 fwmark_mask;
u16 fwmark_shft;
/* time_next = time_this + ((len * rate_ns) >> rate_shft) */
u16 rate_shft;
ktime_t time_next_packet;
ktime_t failsafe_next_packet;
u64 rate_ns;
u64 rate_bps;
u16 rate_flags;
s16 rate_overhead;
u16 rate_mpu;
u64 interval;
u64 target;
/* resource tracking */
u32 buffer_used;
u32 buffer_max_used;
u32 buffer_limit;
u32 buffer_config_limit;
/* indices for dequeue */
u16 cur_tin;
u16 cur_flow;
struct qdisc_watchdog watchdog;
const u8 *tin_index;
const u8 *tin_order;
/* bandwidth capacity estimate */
ktime_t last_packet_time;
ktime_t avg_window_begin;
u64 avg_packet_interval;
u64 avg_window_bytes;
u64 avg_peak_bandwidth;
ktime_t last_reconfig_time;
/* packet length stats */
u32 avg_netoff;
u16 max_netlen;
u16 max_adjlen;
u16 min_netlen;
u16 min_adjlen;
};
enum {
CAKE_FLAG_OVERHEAD = BIT(0),
CAKE_FLAG_AUTORATE_INGRESS = BIT(1),
CAKE_FLAG_INGRESS = BIT(2),
CAKE_FLAG_WASH = BIT(3),
CAKE_FLAG_SPLIT_GSO = BIT(4)
};
/* COBALT operates the Codel and BLUE algorithms in parallel, in order to
* obtain the best features of each. Codel is excellent on flows which
* respond to congestion signals in a TCP-like way. BLUE is more effective on
* unresponsive flows.
*/
struct cobalt_skb_cb {
ktime_t enqueue_time;
u32 adjusted_len;
};
static u64 us_to_ns(u64 us)
{
return us * NSEC_PER_USEC;
}
static struct cobalt_skb_cb *get_cobalt_cb(const struct sk_buff *skb)
{
qdisc_cb_private_validate(skb, sizeof(struct cobalt_skb_cb));
return (struct cobalt_skb_cb *)qdisc_skb_cb(skb)->data;
}
static ktime_t cobalt_get_enqueue_time(const struct sk_buff *skb)
{
return get_cobalt_cb(skb)->enqueue_time;
}
static void cobalt_set_enqueue_time(struct sk_buff *skb,
ktime_t now)
{
get_cobalt_cb(skb)->enqueue_time = now;
}
static u16 quantum_div[CAKE_QUEUES + 1] = {0};
/* Diffserv lookup tables */
static const u8 precedence[] = {
0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2,
3, 3, 3, 3, 3, 3, 3, 3,
4, 4, 4, 4, 4, 4, 4, 4,
5, 5, 5, 5, 5, 5, 5, 5,
6, 6, 6, 6, 6, 6, 6, 6,
7, 7, 7, 7, 7, 7, 7, 7,
};
static const u8 diffserv8[] = {
2, 0, 1, 2, 4, 2, 2, 2,
1, 2, 1, 2, 1, 2, 1, 2,
5, 2, 4, 2, 4, 2, 4, 2,
3, 2, 3, 2, 3, 2, 3, 2,
6, 2, 3, 2, 3, 2, 3, 2,
6, 2, 2, 2, 6, 2, 6, 2,
7, 2, 2, 2, 2, 2, 2, 2,
7, 2, 2, 2, 2, 2, 2, 2,
};
static const u8 diffserv4[] = {
0, 1, 0, 0, 2, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0,
2, 0, 2, 0, 2, 0, 2, 0,
2, 0, 2, 0, 2, 0, 2, 0,
3, 0, 2, 0, 2, 0, 2, 0,
3, 0, 0, 0, 3, 0, 3, 0,
3, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
};
static const u8 diffserv3[] = {
0, 1, 0, 0, 2, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 2, 0, 2, 0,
2, 0, 0, 0, 0, 0, 0, 0,
2, 0, 0, 0, 0, 0, 0, 0,
};
static const u8 besteffort[] = {
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
};
/* tin priority order for stats dumping */
static const u8 normal_order[] = {0, 1, 2, 3, 4, 5, 6, 7};
static const u8 bulk_order[] = {1, 0, 2, 3};
#define REC_INV_SQRT_CACHE (16)
static u32 cobalt_rec_inv_sqrt_cache[REC_INV_SQRT_CACHE] = {0};
/* http://en.wikipedia.org/wiki/Methods_of_computing_square_roots
* new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
*
* Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32
*/
static void cobalt_newton_step(struct cobalt_vars *vars)
{
u32 invsqrt, invsqrt2;
u64 val;
invsqrt = vars->rec_inv_sqrt;
invsqrt2 = ((u64)invsqrt * invsqrt) >> 32;
val = (3LL << 32) - ((u64)vars->count * invsqrt2);
val >>= 2; /* avoid overflow in following multiply */
val = (val * invsqrt) >> (32 - 2 + 1);
vars->rec_inv_sqrt = val;
}
static void cobalt_invsqrt(struct cobalt_vars *vars)
{
if (vars->count < REC_INV_SQRT_CACHE)
vars->rec_inv_sqrt = cobalt_rec_inv_sqrt_cache[vars->count];
else
cobalt_newton_step(vars);
}
/* There is a big difference in timing between the accurate values placed in
* the cache and the approximations given by a single Newton step for small
* count values, particularly when stepping from count 1 to 2 or vice versa.
* Above 16, a single Newton step gives sufficient accuracy in either
* direction, given the precision stored.
*
* The magnitude of the error when stepping up to count 2 is such as to give
* the value that *should* have been produced at count 4.
*/
static void cobalt_cache_init(void)
{
struct cobalt_vars v;
memset(&v, 0, sizeof(v));
v.rec_inv_sqrt = ~0U;
cobalt_rec_inv_sqrt_cache[0] = v.rec_inv_sqrt;
for (v.count = 1; v.count < REC_INV_SQRT_CACHE; v.count++) {
cobalt_newton_step(&v);
cobalt_newton_step(&v);
cobalt_newton_step(&v);
cobalt_newton_step(&v);
cobalt_rec_inv_sqrt_cache[v.count] = v.rec_inv_sqrt;
}
}
static void cobalt_vars_init(struct cobalt_vars *vars)
{
memset(vars, 0, sizeof(*vars));
if (!cobalt_rec_inv_sqrt_cache[0]) {
cobalt_cache_init();
cobalt_rec_inv_sqrt_cache[0] = ~0;
}
}
/* CoDel control_law is t + interval/sqrt(count)
* We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid
* both sqrt() and divide operation.
*/
static ktime_t cobalt_control(ktime_t t,
u64 interval,
u32 rec_inv_sqrt)
{
return ktime_add_ns(t, reciprocal_scale(interval,
rec_inv_sqrt));
}
/* Call this when a packet had to be dropped due to queue overflow. Returns
* true if the BLUE state was quiescent before but active after this call.
*/
static bool cobalt_queue_full(struct cobalt_vars *vars,
struct cobalt_params *p,
ktime_t now)
{
bool up = false;
if (ktime_to_ns(ktime_sub(now, vars->blue_timer)) > p->target) {
up = !vars->p_drop;
vars->p_drop += p->p_inc;
if (vars->p_drop < p->p_inc)
vars->p_drop = ~0;
vars->blue_timer = now;
}
vars->dropping = true;
vars->drop_next = now;
if (!vars->count)
vars->count = 1;
return up;
}
/* Call this when the queue was serviced but turned out to be empty. Returns
* true if the BLUE state was active before but quiescent after this call.
*/
static bool cobalt_queue_empty(struct cobalt_vars *vars,
struct cobalt_params *p,
ktime_t now)
{
bool down = false;
if (vars->p_drop &&
ktime_to_ns(ktime_sub(now, vars->blue_timer)) > p->target) {
if (vars->p_drop < p->p_dec)
vars->p_drop = 0;
else
vars->p_drop -= p->p_dec;
vars->blue_timer = now;
down = !vars->p_drop;
}
vars->dropping = false;
if (vars->count && ktime_to_ns(ktime_sub(now, vars->drop_next)) >= 0) {
vars->count--;
cobalt_invsqrt(vars);
vars->drop_next = cobalt_control(vars->drop_next,
p->interval,
vars->rec_inv_sqrt);
}
return down;
}
/* Call this with a freshly dequeued packet for possible congestion marking.
* Returns true as an instruction to drop the packet, false for delivery.
*/
static bool cobalt_should_drop(struct cobalt_vars *vars,
struct cobalt_params *p,
ktime_t now,
struct sk_buff *skb,
u32 bulk_flows)
{
bool next_due, over_target, drop = false;
ktime_t schedule;
u64 sojourn;
/* The 'schedule' variable records, in its sign, whether 'now' is before or
* after 'drop_next'. This allows 'drop_next' to be updated before the next
* scheduling decision is actually branched, without destroying that
* information. Similarly, the first 'schedule' value calculated is preserved
* in the boolean 'next_due'.
*
* As for 'drop_next', we take advantage of the fact that 'interval' is both
* the delay between first exceeding 'target' and the first signalling event,
* *and* the scaling factor for the signalling frequency. It's therefore very
* natural to use a single mechanism for both purposes, and eliminates a
* significant amount of reference Codel's spaghetti code. To help with this,
* both the '0' and '1' entries in the invsqrt cache are 0xFFFFFFFF, as close
* as possible to 1.0 in fixed-point.
*/
sojourn = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb)));
schedule = ktime_sub(now, vars->drop_next);
over_target = sojourn > p->target &&
sojourn > p->mtu_time * bulk_flows * 2 &&
sojourn > p->mtu_time * 4;
next_due = vars->count && ktime_to_ns(schedule) >= 0;
vars->ecn_marked = false;
if (over_target) {
if (!vars->dropping) {
vars->dropping = true;
vars->drop_next = cobalt_control(now,
p->interval,
vars->rec_inv_sqrt);
}
if (!vars->count)
vars->count = 1;
} else if (vars->dropping) {
vars->dropping = false;
}
if (next_due && vars->dropping) {
/* Use ECN mark if possible, otherwise drop */
drop = !(vars->ecn_marked = INET_ECN_set_ce(skb));
vars->count++;
if (!vars->count)
vars->count--;
cobalt_invsqrt(vars);
vars->drop_next = cobalt_control(vars->drop_next,
p->interval,
vars->rec_inv_sqrt);
schedule = ktime_sub(now, vars->drop_next);
} else {
while (next_due) {
vars->count--;
cobalt_invsqrt(vars);
vars->drop_next = cobalt_control(vars->drop_next,
p->interval,
vars->rec_inv_sqrt);
schedule = ktime_sub(now, vars->drop_next);
next_due = vars->count && ktime_to_ns(schedule) >= 0;
}
}
/* Simple BLUE implementation. Lack of ECN is deliberate. */
if (vars->p_drop)
drop |= (get_random_u32() < vars->p_drop);
/* Overload the drop_next field as an activity timeout */
if (!vars->count)
vars->drop_next = ktime_add_ns(now, p->interval);
else if (ktime_to_ns(schedule) > 0 && !drop)
vars->drop_next = now;
return drop;
}
static bool cake_update_flowkeys(struct flow_keys *keys,
const struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
struct nf_conntrack_tuple tuple = {};
bool rev = !skb->_nfct, upd = false;
__be32 ip;
if (skb_protocol(skb, true) != htons(ETH_P_IP))
return false;
if (!nf_ct_get_tuple_skb(&tuple, skb))
return false;
ip = rev ? tuple.dst.u3.ip : tuple.src.u3.ip;
if (ip != keys->addrs.v4addrs.src) {
keys->addrs.v4addrs.src = ip;
upd = true;
}
ip = rev ? tuple.src.u3.ip : tuple.dst.u3.ip;
if (ip != keys->addrs.v4addrs.dst) {
keys->addrs.v4addrs.dst = ip;
upd = true;
}
if (keys->ports.ports) {
__be16 port;
port = rev ? tuple.dst.u.all : tuple.src.u.all;
if (port != keys->ports.src) {
keys->ports.src = port;
upd = true;
}
port = rev ? tuple.src.u.all : tuple.dst.u.all;
if (port != keys->ports.dst) {
port = keys->ports.dst;
upd = true;
}
}
return upd;
#else
return false;
#endif
}
/* Cake has several subtle multiple bit settings. In these cases you
* would be matching triple isolate mode as well.
*/
static bool cake_dsrc(int flow_mode)
{
return (flow_mode & CAKE_FLOW_DUAL_SRC) == CAKE_FLOW_DUAL_SRC;
}
static bool cake_ddst(int flow_mode)
{
return (flow_mode & CAKE_FLOW_DUAL_DST) == CAKE_FLOW_DUAL_DST;
}
static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
int flow_mode, u16 flow_override, u16 host_override)
{
bool hash_flows = (!flow_override && !!(flow_mode & CAKE_FLOW_FLOWS));
bool hash_hosts = (!host_override && !!(flow_mode & CAKE_FLOW_HOSTS));
bool nat_enabled = !!(flow_mode & CAKE_FLOW_NAT_FLAG);
u32 flow_hash = 0, srchost_hash = 0, dsthost_hash = 0;
u16 reduced_hash, srchost_idx, dsthost_idx;
struct flow_keys keys, host_keys;
bool use_skbhash = skb->l4_hash;
if (unlikely(flow_mode == CAKE_FLOW_NONE))
return 0;
/* If both overrides are set, or we can use the SKB hash and nat mode is
* disabled, we can skip packet dissection entirely. If nat mode is
* enabled there's another check below after doing the conntrack lookup.
*/
if ((!hash_flows || (use_skbhash && !nat_enabled)) && !hash_hosts)
goto skip_hash;
skb_flow_dissect_flow_keys(skb, &keys,
FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
/* Don't use the SKB hash if we change the lookup keys from conntrack */
if (nat_enabled && cake_update_flowkeys(&keys, skb))
use_skbhash = false;
/* If we can still use the SKB hash and don't need the host hash, we can
* skip the rest of the hashing procedure
*/
if (use_skbhash && !hash_hosts)
goto skip_hash;
/* flow_hash_from_keys() sorts the addresses by value, so we have
* to preserve their order in a separate data structure to treat
* src and dst host addresses as independently selectable.
*/
host_keys = keys;
host_keys.ports.ports = 0;
host_keys.basic.ip_proto = 0;
host_keys.keyid.keyid = 0;
host_keys.tags.flow_label = 0;
switch (host_keys.control.addr_type) {
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
host_keys.addrs.v4addrs.src = 0;
dsthost_hash = flow_hash_from_keys(&host_keys);
host_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
host_keys.addrs.v4addrs.dst = 0;
srchost_hash = flow_hash_from_keys(&host_keys);
break;
case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
memset(&host_keys.addrs.v6addrs.src, 0,
sizeof(host_keys.addrs.v6addrs.src));
dsthost_hash = flow_hash_from_keys(&host_keys);
host_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
memset(&host_keys.addrs.v6addrs.dst, 0,
sizeof(host_keys.addrs.v6addrs.dst));
srchost_hash = flow_hash_from_keys(&host_keys);
break;
default:
dsthost_hash = 0;
srchost_hash = 0;
}
/* This *must* be after the above switch, since as a
* side-effect it sorts the src and dst addresses.
*/
if (hash_flows && !use_skbhash)
flow_hash = flow_hash_from_keys(&keys);
skip_hash:
if (flow_override)
flow_hash = flow_override - 1;
else if (use_skbhash && (flow_mode & CAKE_FLOW_FLOWS))
flow_hash = skb->hash;
if (host_override) {
dsthost_hash = host_override - 1;
srchost_hash = host_override - 1;
}
if (!(flow_mode & CAKE_FLOW_FLOWS)) {
if (flow_mode & CAKE_FLOW_SRC_IP)
flow_hash ^= srchost_hash;
if (flow_mode & CAKE_FLOW_DST_IP)
flow_hash ^= dsthost_hash;
}
reduced_hash = flow_hash % CAKE_QUEUES;
/* set-associative hashing */
/* fast path if no hash collision (direct lookup succeeds) */
if (likely(q->tags[reduced_hash] == flow_hash &&
q->flows[reduced_hash].set)) {
q->way_directs++;
} else {
u32 inner_hash = reduced_hash % CAKE_SET_WAYS;
u32 outer_hash = reduced_hash - inner_hash;
bool allocate_src = false;
bool allocate_dst = false;
u32 i, k;
/* check if any active queue in the set is reserved for
* this flow.
*/
for (i = 0, k = inner_hash; i < CAKE_SET_WAYS;
i++, k = (k + 1) % CAKE_SET_WAYS) {
if (q->tags[outer_hash + k] == flow_hash) {
if (i)
q->way_hits++;
if (!q->flows[outer_hash + k].set) {
/* need to increment host refcnts */
allocate_src = cake_dsrc(flow_mode);
allocate_dst = cake_ddst(flow_mode);
}
goto found;
}
}
/* no queue is reserved for this flow, look for an
* empty one.
*/
for (i = 0; i < CAKE_SET_WAYS;
i++, k = (k + 1) % CAKE_SET_WAYS) {
if (!q->flows[outer_hash + k].set) {
q->way_misses++;
allocate_src = cake_dsrc(flow_mode);
allocate_dst = cake_ddst(flow_mode);
goto found;
}
}
/* With no empty queues, default to the original
* queue, accept the collision, update the host tags.
*/
q->way_collisions++;
if (q->flows[outer_hash + k].set == CAKE_SET_BULK) {
q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--;
q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--;
}
allocate_src = cake_dsrc(flow_mode);
allocate_dst = cake_ddst(flow_mode);
found:
/* reserve queue for future packets in same flow */
reduced_hash = outer_hash + k;
q->tags[reduced_hash] = flow_hash;
if (allocate_src) {
srchost_idx = srchost_hash % CAKE_QUEUES;
inner_hash = srchost_idx % CAKE_SET_WAYS;
outer_hash = srchost_idx - inner_hash;
for (i = 0, k = inner_hash; i < CAKE_SET_WAYS;
i++, k = (k + 1) % CAKE_SET_WAYS) {
if (q->hosts[outer_hash + k].srchost_tag ==
srchost_hash)
goto found_src;
}
for (i = 0; i < CAKE_SET_WAYS;
i++, k = (k + 1) % CAKE_SET_WAYS) {
if (!q->hosts[outer_hash + k].srchost_bulk_flow_count)
break;
}
q->hosts[outer_hash + k].srchost_tag = srchost_hash;
found_src:
srchost_idx = outer_hash + k;
if (q->flows[reduced_hash].set == CAKE_SET_BULK)
q->hosts[srchost_idx].srchost_bulk_flow_count++;
q->flows[reduced_hash].srchost = srchost_idx;
}
if (allocate_dst) {
dsthost_idx = dsthost_hash % CAKE_QUEUES;
inner_hash = dsthost_idx % CAKE_SET_WAYS;
outer_hash = dsthost_idx - inner_hash;
for (i = 0, k = inner_hash; i < CAKE_SET_WAYS;
i++, k = (k + 1) % CAKE_SET_WAYS) {
if (q->hosts[outer_hash + k].dsthost_tag ==
dsthost_hash)
goto found_dst;
}
for (i = 0; i < CAKE_SET_WAYS;
i++, k = (k + 1) % CAKE_SET_WAYS) {
if (!q->hosts[outer_hash + k].dsthost_bulk_flow_count)
break;
}
q->hosts[outer_hash + k].dsthost_tag = dsthost_hash;
found_dst:
dsthost_idx = outer_hash + k;
if (q->flows[reduced_hash].set == CAKE_SET_BULK)
q->hosts[dsthost_idx].dsthost_bulk_flow_count++;
q->flows[reduced_hash].dsthost = dsthost_idx;
}
}
return reduced_hash;
}
/* helper functions : might be changed when/if skb use a standard list_head */
/* remove one skb from head of slot queue */
static struct sk_buff *dequeue_head(struct cake_flow *flow)
{
struct sk_buff *skb = flow->head;
if (skb) {
flow->head = skb->next;
skb_mark_not_on_list(skb);
}
return skb;
}
/* add skb to flow queue (tail add) */
static void flow_queue_add(struct cake_flow *flow, struct sk_buff *skb)
{
if (!flow->head)
flow->head = skb;
else
flow->tail->next = skb;
flow->tail = skb;
skb->next = NULL;
}
static struct iphdr *cake_get_iphdr(const struct sk_buff *skb,
struct ipv6hdr *buf)
{
unsigned int offset = skb_network_offset(skb);
struct iphdr *iph;
iph = skb_header_pointer(skb, offset, sizeof(struct iphdr), buf);
if (!iph)
return NULL;
if (iph->version == 4 && iph->protocol == IPPROTO_IPV6)
return skb_header_pointer(skb, offset + iph->ihl * 4,
sizeof(struct ipv6hdr), buf);
else if (iph->version == 4)
return iph;
else if (iph->version == 6)
return skb_header_pointer(skb, offset, sizeof(struct ipv6hdr),
buf);
return NULL;
}
static struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb,
void *buf, unsigned int bufsize)
{
unsigned int offset = skb_network_offset(skb);
const struct ipv6hdr *ipv6h;
const struct tcphdr *tcph;
const struct iphdr *iph;
struct ipv6hdr _ipv6h;
struct tcphdr _tcph;
ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
if (!ipv6h)
return NULL;
if (ipv6h->version == 4) {
iph = (struct iphdr *)ipv6h;
offset += iph->ihl * 4;
/* special-case 6in4 tunnelling, as that is a common way to get
* v6 connectivity in the home
*/
if (iph->protocol == IPPROTO_IPV6) {
ipv6h = skb_header_pointer(skb, offset,
sizeof(_ipv6h), &_ipv6h);
if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP)
return NULL;
offset += sizeof(struct ipv6hdr);
} else if (iph->protocol != IPPROTO_TCP) {
return NULL;
}
} else if (ipv6h->version == 6) {
if (ipv6h->nexthdr != IPPROTO_TCP)
return NULL;
offset += sizeof(struct ipv6hdr);
} else {
return NULL;
}
tcph = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
if (!tcph || tcph->doff < 5)
return NULL;
return skb_header_pointer(skb, offset,
min(__tcp_hdrlen(tcph), bufsize), buf);
}
static const void *cake_get_tcpopt(const struct tcphdr *tcph,
int code, int *oplen)
{
/* inspired by tcp_parse_options in tcp_input.c */
int length = __tcp_hdrlen(tcph) - sizeof(struct tcphdr);
const u8 *ptr = (const u8 *)(tcph + 1);
while (length > 0) {
int opcode = *ptr++;
int opsize;
if (opcode == TCPOPT_EOL)
break;
if (opcode == TCPOPT_NOP) {
length--;
continue;
}
if (length < 2)
break;
opsize = *ptr++;
if (opsize < 2 || opsize > length)
break;
if (opcode == code) {
*oplen = opsize;
return ptr;
}
ptr += opsize - 2;
length -= opsize;
}
return NULL;
}
/* Compare two SACK sequences. A sequence is considered greater if it SACKs more
* bytes than the other. In the case where both sequences ACKs bytes that the
* other doesn't, A is considered greater. DSACKs in A also makes A be
* considered greater.
*
* @return -1, 0 or 1 as normal compare functions
*/
static int cake_tcph_sack_compare(const struct tcphdr *tcph_a,
const struct tcphdr *tcph_b)
{
const struct tcp_sack_block_wire *sack_a, *sack_b;
u32 ack_seq_a = ntohl(tcph_a->ack_seq);
u32 bytes_a = 0, bytes_b = 0;
int oplen_a, oplen_b;
bool first = true;
sack_a = cake_get_tcpopt(tcph_a, TCPOPT_SACK, &oplen_a);
sack_b = cake_get_tcpopt(tcph_b, TCPOPT_SACK, &oplen_b);
/* pointers point to option contents */
oplen_a -= TCPOLEN_SACK_BASE;
oplen_b -= TCPOLEN_SACK_BASE;
if (sack_a && oplen_a >= sizeof(*sack_a) &&
(!sack_b || oplen_b < sizeof(*sack_b)))
return -1;
else if (sack_b && oplen_b >= sizeof(*sack_b) &&
(!sack_a || oplen_a < sizeof(*sack_a)))
return 1;
else if ((!sack_a || oplen_a < sizeof(*sack_a)) &&
(!sack_b || oplen_b < sizeof(*sack_b)))
return 0;
while (oplen_a >= sizeof(*sack_a)) {
const struct tcp_sack_block_wire *sack_tmp = sack_b;
u32 start_a = get_unaligned_be32(&sack_a->start_seq);
u32 end_a = get_unaligned_be32(&sack_a->end_seq);
int oplen_tmp = oplen_b;
bool found = false;
/* DSACK; always considered greater to prevent dropping */
if (before(start_a, ack_seq_a))
return -1;
bytes_a += end_a - start_a;
while (oplen_tmp >= sizeof(*sack_tmp)) {
u32 start_b = get_unaligned_be32(&sack_tmp->start_seq);
u32 end_b = get_unaligned_be32(&sack_tmp->end_seq);
/* first time through we count the total size */
if (first)
bytes_b += end_b - start_b;
if (!after(start_b, start_a) && !before(end_b, end_a)) {
found = true;
if (!first)
break;
}
oplen_tmp -= sizeof(*sack_tmp);
sack_tmp++;
}
if (!found)
return -1;
oplen_a -= sizeof(*sack_a);
sack_a++;
first = false;
}
/* If we made it this far, all ranges SACKed by A are covered by B, so
* either the SACKs are equal, or B SACKs more bytes.
*/
return bytes_b > bytes_a ? 1 : 0;
}
static void cake_tcph_get_tstamp(const struct tcphdr *tcph,
u32 *tsval, u32 *tsecr)
{
const u8 *ptr;
int opsize;
ptr = cake_get_tcpopt(tcph, TCPOPT_TIMESTAMP, &opsize);
if (ptr && opsize == TCPOLEN_TIMESTAMP) {
*tsval = get_unaligned_be32(ptr);
*tsecr = get_unaligned_be32(ptr + 4);
}
}
static bool cake_tcph_may_drop(const struct tcphdr *tcph,
u32 tstamp_new, u32 tsecr_new)
{
/* inspired by tcp_parse_options in tcp_input.c */
int length = __tcp_hdrlen(tcph) - sizeof(struct tcphdr);
const u8 *ptr = (const u8 *)(tcph + 1);
u32 tstamp, tsecr;
/* 3 reserved flags must be unset to avoid future breakage
* ACK must be set
* ECE/CWR are handled separately
* All other flags URG/PSH/RST/SYN/FIN must be unset
* 0x0FFF0000 = all TCP flags (confirm ACK=1, others zero)
* 0x00C00000 = CWR/ECE (handled separately)
* 0x0F3F0000 = 0x0FFF0000 & ~0x00C00000
*/
if (((tcp_flag_word(tcph) &
cpu_to_be32(0x0F3F0000)) != TCP_FLAG_ACK))
return false;
while (length > 0) {
int opcode = *ptr++;
int opsize;
if (opcode == TCPOPT_EOL)
break;
if (opcode == TCPOPT_NOP) {
length--;
continue;
}
if (length < 2)
break;
opsize = *ptr++;
if (opsize < 2 || opsize > length)
break;
switch (opcode) {
case TCPOPT_MD5SIG: /* doesn't influence state */
break;
case TCPOPT_SACK: /* stricter checking performed later */
if (opsize % 8 != 2)
return false;
break;
case TCPOPT_TIMESTAMP:
/* only drop timestamps lower than new */
if (opsize != TCPOLEN_TIMESTAMP)
return false;
tstamp = get_unaligned_be32(ptr);
tsecr = get_unaligned_be32(ptr + 4);
if (after(tstamp, tstamp_new) ||
after(tsecr, tsecr_new))
return false;
break;
case TCPOPT_MSS: /* these should only be set on SYN */
case TCPOPT_WINDOW:
case TCPOPT_SACK_PERM:
case TCPOPT_FASTOPEN:
case TCPOPT_EXP:
default: /* don't drop if any unknown options are present */
return false;
}
ptr += opsize - 2;
length -= opsize;
}
return true;
}
static struct sk_buff *cake_ack_filter(struct cake_sched_data *q,
struct cake_flow *flow)
{
bool aggressive = q->ack_filter == CAKE_ACK_AGGRESSIVE;
struct sk_buff *elig_ack = NULL, *elig_ack_prev = NULL;
struct sk_buff *skb_check, *skb_prev = NULL;
const struct ipv6hdr *ipv6h, *ipv6h_check;
unsigned char _tcph[64], _tcph_check[64];
const struct tcphdr *tcph, *tcph_check;
const struct iphdr *iph, *iph_check;
struct ipv6hdr _iph, _iph_check;
const struct sk_buff *skb;
int seglen, num_found = 0;
u32 tstamp = 0, tsecr = 0;
__be32 elig_flags = 0;
int sack_comp;
/* no other possible ACKs to filter */
if (flow->head == flow->tail)
return NULL;
skb = flow->tail;
tcph = cake_get_tcphdr(skb, _tcph, sizeof(_tcph));
iph = cake_get_iphdr(skb, &_iph);
if (!tcph)
return NULL;
cake_tcph_get_tstamp(tcph, &tstamp, &tsecr);
/* the 'triggering' packet need only have the ACK flag set.
* also check that SYN is not set, as there won't be any previous ACKs.
*/
if ((tcp_flag_word(tcph) &
(TCP_FLAG_ACK | TCP_FLAG_SYN)) != TCP_FLAG_ACK)
return NULL;
/* the 'triggering' ACK is at the tail of the queue, we have already
* returned if it is the only packet in the flow. loop through the rest
* of the queue looking for pure ACKs with the same 5-tuple as the
* triggering one.
*/
for (skb_check = flow->head;
skb_check && skb_check != skb;
skb_prev = skb_check, skb_check = skb_check->next) {
iph_check = cake_get_iphdr(skb_check, &_iph_check);
tcph_check = cake_get_tcphdr(skb_check, &_tcph_check,
sizeof(_tcph_check));
/* only TCP packets with matching 5-tuple are eligible, and only
* drop safe headers
*/
if (!tcph_check || iph->version != iph_check->version ||
tcph_check->source != tcph->source ||
tcph_check->dest != tcph->dest)
continue;
if (iph_check->version == 4) {
if (iph_check->saddr != iph->saddr ||
iph_check->daddr != iph->daddr)
continue;
seglen = iph_totlen(skb, iph_check) -
(4 * iph_check->ihl);
} else if (iph_check->version == 6) {
ipv6h = (struct ipv6hdr *)iph;
ipv6h_check = (struct ipv6hdr *)iph_check;
if (ipv6_addr_cmp(&ipv6h_check->saddr, &ipv6h->saddr) ||
ipv6_addr_cmp(&ipv6h_check->daddr, &ipv6h->daddr))
continue;
seglen = ntohs(ipv6h_check->payload_len);
} else {
WARN_ON(1); /* shouldn't happen */
continue;
}
/* If the ECE/CWR flags changed from the previous eligible
* packet in the same flow, we should no longer be dropping that
* previous packet as this would lose information.
*/
if (elig_ack && (tcp_flag_word(tcph_check) &
(TCP_FLAG_ECE | TCP_FLAG_CWR)) != elig_flags) {
elig_ack = NULL;
elig_ack_prev = NULL;
num_found--;
}
/* Check TCP options and flags, don't drop ACKs with segment
* data, and don't drop ACKs with a higher cumulative ACK
* counter than the triggering packet. Check ACK seqno here to
* avoid parsing SACK options of packets we are going to exclude
* anyway.
*/
if (!cake_tcph_may_drop(tcph_check, tstamp, tsecr) ||
(seglen - __tcp_hdrlen(tcph_check)) != 0 ||
after(ntohl(tcph_check->ack_seq), ntohl(tcph->ack_seq)))
continue;
/* Check SACK options. The triggering packet must SACK more data
* than the ACK under consideration, or SACK the same range but
* have a larger cumulative ACK counter. The latter is a
* pathological case, but is contained in the following check
* anyway, just to be safe.
*/
sack_comp = cake_tcph_sack_compare(tcph_check, tcph);
if (sack_comp < 0 ||
(ntohl(tcph_check->ack_seq) == ntohl(tcph->ack_seq) &&
sack_comp == 0))
continue;
/* At this point we have found an eligible pure ACK to drop; if
* we are in aggressive mode, we are done. Otherwise, keep
* searching unless this is the second eligible ACK we
* found.
*
* Since we want to drop ACK closest to the head of the queue,
* save the first eligible ACK we find, even if we need to loop
* again.
*/
if (!elig_ack) {
elig_ack = skb_check;
elig_ack_prev = skb_prev;
elig_flags = (tcp_flag_word(tcph_check)
& (TCP_FLAG_ECE | TCP_FLAG_CWR));
}
if (num_found++ > 0)
goto found;
}
/* We made it through the queue without finding two eligible ACKs . If
* we found a single eligible ACK we can drop it in aggressive mode if
* we can guarantee that this does not interfere with ECN flag
* information. We ensure this by dropping it only if the enqueued
* packet is consecutive with the eligible ACK, and their flags match.
*/
if (elig_ack && aggressive && elig_ack->next == skb &&
(elig_flags == (tcp_flag_word(tcph) &
(TCP_FLAG_ECE | TCP_FLAG_CWR))))
goto found;
return NULL;
found:
if (elig_ack_prev)
elig_ack_prev->next = elig_ack->next;
else
flow->head = elig_ack->next;
skb_mark_not_on_list(elig_ack);
return elig_ack;
}
static u64 cake_ewma(u64 avg, u64 sample, u32 shift)
{
avg -= avg >> shift;
avg += sample >> shift;
return avg;
}
static u32 cake_calc_overhead(struct cake_sched_data *q, u32 len, u32 off)
{
if (q->rate_flags & CAKE_FLAG_OVERHEAD)
len -= off;
if (q->max_netlen < len)
q->max_netlen = len;
if (q->min_netlen > len)
q->min_netlen = len;
len += q->rate_overhead;
if (len < q->rate_mpu)
len = q->rate_mpu;
if (q->atm_mode == CAKE_ATM_ATM) {
len += 47;
len /= 48;
len *= 53;
} else if (q->atm_mode == CAKE_ATM_PTM) {
/* Add one byte per 64 bytes or part thereof.
* This is conservative and easier to calculate than the
* precise value.
*/
len += (len + 63) / 64;
}
if (q->max_adjlen < len)
q->max_adjlen = len;
if (q->min_adjlen > len)
q->min_adjlen = len;
return len;
}
static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb)
{
const struct skb_shared_info *shinfo = skb_shinfo(skb);
unsigned int hdr_len, last_len = 0;
u32 off = skb_network_offset(skb);
u32 len = qdisc_pkt_len(skb);
u16 segs = 1;
q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8);
if (!shinfo->gso_size)
return cake_calc_overhead(q, len, off);
/* borrowed from qdisc_pkt_len_init() */
hdr_len = skb_transport_offset(skb);
/* + transport layer */
if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 |
SKB_GSO_TCPV6))) {
const struct tcphdr *th;
struct tcphdr _tcphdr;
th = skb_header_pointer(skb, hdr_len,
sizeof(_tcphdr), &_tcphdr);
if (likely(th))
hdr_len += __tcp_hdrlen(th);
} else {
struct udphdr _udphdr;
if (skb_header_pointer(skb, hdr_len,
sizeof(_udphdr), &_udphdr))
hdr_len += sizeof(struct udphdr);
}
if (unlikely(shinfo->gso_type & SKB_GSO_DODGY))
segs = DIV_ROUND_UP(skb->len - hdr_len,
shinfo->gso_size);
else
segs = shinfo->gso_segs;
len = shinfo->gso_size + hdr_len;
last_len = skb->len - shinfo->gso_size * (segs - 1);
return (cake_calc_overhead(q, len, off) * (segs - 1) +
cake_calc_overhead(q, last_len, off));
}
static void cake_heap_swap(struct cake_sched_data *q, u16 i, u16 j)
{
struct cake_heap_entry ii = q->overflow_heap[i];
struct cake_heap_entry jj = q->overflow_heap[j];
q->overflow_heap[i] = jj;
q->overflow_heap[j] = ii;
q->tins[ii.t].overflow_idx[ii.b] = j;
q->tins[jj.t].overflow_idx[jj.b] = i;
}
static u32 cake_heap_get_backlog(const struct cake_sched_data *q, u16 i)
{
struct cake_heap_entry ii = q->overflow_heap[i];
return q->tins[ii.t].backlogs[ii.b];
}
static void cake_heapify(struct cake_sched_data *q, u16 i)
{
static const u32 a = CAKE_MAX_TINS * CAKE_QUEUES;
u32 mb = cake_heap_get_backlog(q, i);
u32 m = i;
while (m < a) {
u32 l = m + m + 1;
u32 r = l + 1;
if (l < a) {
u32 lb = cake_heap_get_backlog(q, l);
if (lb > mb) {
m = l;
mb = lb;
}
}
if (r < a) {
u32 rb = cake_heap_get_backlog(q, r);
if (rb > mb) {
m = r;
mb = rb;
}
}
if (m != i) {
cake_heap_swap(q, i, m);
i = m;
} else {
break;
}
}
}
static void cake_heapify_up(struct cake_sched_data *q, u16 i)
{
while (i > 0 && i < CAKE_MAX_TINS * CAKE_QUEUES) {
u16 p = (i - 1) >> 1;
u32 ib = cake_heap_get_backlog(q, i);
u32 pb = cake_heap_get_backlog(q, p);
if (ib > pb) {
cake_heap_swap(q, i, p);
i = p;
} else {
break;
}
}
}
static int cake_advance_shaper(struct cake_sched_data *q,
struct cake_tin_data *b,
struct sk_buff *skb,
ktime_t now, bool drop)
{
u32 len = get_cobalt_cb(skb)->adjusted_len;
/* charge packet bandwidth to this tin
* and to the global shaper.
*/
if (q->rate_ns) {
u64 tin_dur = (len * b->tin_rate_ns) >> b->tin_rate_shft;
u64 global_dur = (len * q->rate_ns) >> q->rate_shft;
u64 failsafe_dur = global_dur + (global_dur >> 1);
if (ktime_before(b->time_next_packet, now))
b->time_next_packet = ktime_add_ns(b->time_next_packet,
tin_dur);
else if (ktime_before(b->time_next_packet,
ktime_add_ns(now, tin_dur)))
b->time_next_packet = ktime_add_ns(now, tin_dur);
q->time_next_packet = ktime_add_ns(q->time_next_packet,
global_dur);
if (!drop)
q->failsafe_next_packet = \
ktime_add_ns(q->failsafe_next_packet,
failsafe_dur);
}
return len;
}
static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
{
struct cake_sched_data *q = qdisc_priv(sch);
ktime_t now = ktime_get();
u32 idx = 0, tin = 0, len;
struct cake_heap_entry qq;
struct cake_tin_data *b;
struct cake_flow *flow;
struct sk_buff *skb;
if (!q->overflow_timeout) {
int i;
/* Build fresh max-heap */
for (i = CAKE_MAX_TINS * CAKE_QUEUES / 2; i >= 0; i--)
cake_heapify(q, i);
}
q->overflow_timeout = 65535;
/* select longest queue for pruning */
qq = q->overflow_heap[0];
tin = qq.t;
idx = qq.b;
b = &q->tins[tin];
flow = &b->flows[idx];
skb = dequeue_head(flow);
if (unlikely(!skb)) {
/* heap has gone wrong, rebuild it next time */
q->overflow_timeout = 0;
return idx + (tin << 16);
}
if (cobalt_queue_full(&flow->cvars, &b->cparams, now))
b->unresponsive_flow_count++;
len = qdisc_pkt_len(skb);
q->buffer_used -= skb->truesize;
b->backlogs[idx] -= len;
b->tin_backlog -= len;
sch->qstats.backlog -= len;
qdisc_tree_reduce_backlog(sch, 1, len);
flow->dropped++;
b->tin_dropped++;
sch->qstats.drops++;
if (q->rate_flags & CAKE_FLAG_INGRESS)
cake_advance_shaper(q, b, skb, now, true);
__qdisc_drop(skb, to_free);
sch->q.qlen--;
cake_heapify(q, 0);
return idx + (tin << 16);
}
static u8 cake_handle_diffserv(struct sk_buff *skb, bool wash)
{
const int offset = skb_network_offset(skb);
u16 *buf, buf_;
u8 dscp;
switch (skb_protocol(skb, true)) {
case htons(ETH_P_IP):
buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_);
if (unlikely(!buf))
return 0;
/* ToS is in the second byte of iphdr */
dscp = ipv4_get_dsfield((struct iphdr *)buf) >> 2;
if (wash && dscp) {
const int wlen = offset + sizeof(struct iphdr);
if (!pskb_may_pull(skb, wlen) ||
skb_try_make_writable(skb, wlen))
return 0;
ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
}
return dscp;
case htons(ETH_P_IPV6):
buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_);
if (unlikely(!buf))
return 0;
/* Traffic class is in the first and second bytes of ipv6hdr */
dscp = ipv6_get_dsfield((struct ipv6hdr *)buf) >> 2;
if (wash && dscp) {
const int wlen = offset + sizeof(struct ipv6hdr);
if (!pskb_may_pull(skb, wlen) ||
skb_try_make_writable(skb, wlen))
return 0;
ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
}
return dscp;
case htons(ETH_P_ARP):
return 0x38; /* CS7 - Net Control */
default:
/* If there is no Diffserv field, treat as best-effort */
return 0;
}
}
static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
struct sk_buff *skb)
{
struct cake_sched_data *q = qdisc_priv(sch);
u32 tin, mark;
bool wash;
u8 dscp;
/* Tin selection: Default to diffserv-based selection, allow overriding
* using firewall marks or skb->priority. Call DSCP parsing early if
* wash is enabled, otherwise defer to below to skip unneeded parsing.
*/
mark = (skb->mark & q->fwmark_mask) >> q->fwmark_shft;
wash = !!(q->rate_flags & CAKE_FLAG_WASH);
if (wash)
dscp = cake_handle_diffserv(skb, wash);
if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT)
tin = 0;
else if (mark && mark <= q->tin_cnt)
tin = q->tin_order[mark - 1];
else if (TC_H_MAJ(skb->priority) == sch->handle &&
TC_H_MIN(skb->priority) > 0 &&
TC_H_MIN(skb->priority) <= q->tin_cnt)
tin = q->tin_order[TC_H_MIN(skb->priority) - 1];
else {
if (!wash)
dscp = cake_handle_diffserv(skb, wash);
tin = q->tin_index[dscp];
if (unlikely(tin >= q->tin_cnt))
tin = 0;
}
return &q->tins[tin];
}
static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t,
struct sk_buff *skb, int flow_mode, int *qerr)
{
struct cake_sched_data *q = qdisc_priv(sch);
struct tcf_proto *filter;
struct tcf_result res;
u16 flow = 0, host = 0;
int result;
filter = rcu_dereference_bh(q->filter_list);
if (!filter)
goto hash;
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
result = tcf_classify(skb, NULL, filter, &res, false);
if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
case TC_ACT_TRAP:
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
fallthrough;
case TC_ACT_SHOT:
return 0;
}
#endif
if (TC_H_MIN(res.classid) <= CAKE_QUEUES)
flow = TC_H_MIN(res.classid);
if (TC_H_MAJ(res.classid) <= (CAKE_QUEUES << 16))
host = TC_H_MAJ(res.classid) >> 16;
}
hash:
*t = cake_select_tin(sch, skb);
return cake_hash(*t, skb, flow_mode, flow, host) + 1;
}
static void cake_reconfigure(struct Qdisc *sch);
static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct cake_sched_data *q = qdisc_priv(sch);
int len = qdisc_pkt_len(skb);
int ret;
struct sk_buff *ack = NULL;
ktime_t now = ktime_get();
struct cake_tin_data *b;
struct cake_flow *flow;
u32 idx;
/* choose flow to insert into */
idx = cake_classify(sch, &b, skb, q->flow_mode, &ret);
if (idx == 0) {
if (ret & __NET_XMIT_BYPASS)
qdisc_qstats_drop(sch);
__qdisc_drop(skb, to_free);
return ret;
}
idx--;
flow = &b->flows[idx];
/* ensure shaper state isn't stale */
if (!b->tin_backlog) {
if (ktime_before(b->time_next_packet, now))
b->time_next_packet = now;
if (!sch->q.qlen) {
if (ktime_before(q->time_next_packet, now)) {
q->failsafe_next_packet = now;
q->time_next_packet = now;
} else if (ktime_after(q->time_next_packet, now) &&
ktime_after(q->failsafe_next_packet, now)) {
u64 next = \
min(ktime_to_ns(q->time_next_packet),
ktime_to_ns(
q->failsafe_next_packet));
sch->qstats.overlimits++;
qdisc_watchdog_schedule_ns(&q->watchdog, next);
}
}
}
if (unlikely(len > b->max_skblen))
b->max_skblen = len;
if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
struct sk_buff *segs, *nskb;
netdev_features_t features = netif_skb_features(skb);
unsigned int slen = 0, numsegs = 0;
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
if (IS_ERR_OR_NULL(segs))
return qdisc_drop(skb, sch, to_free);
skb_list_walk_safe(segs, segs, nskb) {
skb_mark_not_on_list(segs);
qdisc_skb_cb(segs)->pkt_len = segs->len;
cobalt_set_enqueue_time(segs, now);
get_cobalt_cb(segs)->adjusted_len = cake_overhead(q,
segs);
flow_queue_add(flow, segs);
sch->q.qlen++;
numsegs++;
slen += segs->len;
q->buffer_used += segs->truesize;
b->packets++;
}
/* stats */
b->bytes += slen;
b->backlogs[idx] += slen;
b->tin_backlog += slen;
sch->qstats.backlog += slen;
q->avg_window_bytes += slen;
qdisc_tree_reduce_backlog(sch, 1-numsegs, len-slen);
consume_skb(skb);
} else {
/* not splitting */
cobalt_set_enqueue_time(skb, now);
get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
flow_queue_add(flow, skb);
if (q->ack_filter)
ack = cake_ack_filter(q, flow);
if (ack) {
b->ack_drops++;
sch->qstats.drops++;
b->bytes += qdisc_pkt_len(ack);
len -= qdisc_pkt_len(ack);
q->buffer_used += skb->truesize - ack->truesize;
if (q->rate_flags & CAKE_FLAG_INGRESS)
cake_advance_shaper(q, b, ack, now, true);
qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
consume_skb(ack);
} else {
sch->q.qlen++;
q->buffer_used += skb->truesize;
}
/* stats */
b->packets++;
b->bytes += len;
b->backlogs[idx] += len;
b->tin_backlog += len;
sch->qstats.backlog += len;
q->avg_window_bytes += len;
}
if (q->overflow_timeout)
cake_heapify_up(q, b->overflow_idx[idx]);
/* incoming bandwidth capacity estimate */
if (q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) {
u64 packet_interval = \
ktime_to_ns(ktime_sub(now, q->last_packet_time));
if (packet_interval > NSEC_PER_SEC)
packet_interval = NSEC_PER_SEC;
/* filter out short-term bursts, eg. wifi aggregation */
q->avg_packet_interval = \
cake_ewma(q->avg_packet_interval,
packet_interval,
(packet_interval > q->avg_packet_interval ?
2 : 8));
q->last_packet_time = now;
if (packet_interval > q->avg_packet_interval) {
u64 window_interval = \
ktime_to_ns(ktime_sub(now,
q->avg_window_begin));
u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC;
b = div64_u64(b, window_interval);
q->avg_peak_bandwidth =
cake_ewma(q->avg_peak_bandwidth, b,
b > q->avg_peak_bandwidth ? 2 : 8);
q->avg_window_bytes = 0;
q->avg_window_begin = now;
if (ktime_after(now,
ktime_add_ms(q->last_reconfig_time,
250))) {
q->rate_bps = (q->avg_peak_bandwidth * 15) >> 4;
cake_reconfigure(sch);
}
}
} else {
q->avg_window_bytes = 0;
q->last_packet_time = now;
}
/* flowchain */
if (!flow->set || flow->set == CAKE_SET_DECAYING) {
struct cake_host *srchost = &b->hosts[flow->srchost];
struct cake_host *dsthost = &b->hosts[flow->dsthost];
u16 host_load = 1;
if (!flow->set) {
list_add_tail(&flow->flowchain, &b->new_flows);
} else {
b->decaying_flow_count--;
list_move_tail(&flow->flowchain, &b->new_flows);
}
flow->set = CAKE_SET_SPARSE;
b->sparse_flow_count++;
if (cake_dsrc(q->flow_mode))
host_load = max(host_load, srchost->srchost_bulk_flow_count);
if (cake_ddst(q->flow_mode))
host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
flow->deficit = (b->flow_quantum *
quantum_div[host_load]) >> 16;
} else if (flow->set == CAKE_SET_SPARSE_WAIT) {
struct cake_host *srchost = &b->hosts[flow->srchost];
struct cake_host *dsthost = &b->hosts[flow->dsthost];
/* this flow was empty, accounted as a sparse flow, but actually
* in the bulk rotation.
*/
flow->set = CAKE_SET_BULK;
b->sparse_flow_count--;
b->bulk_flow_count++;
if (cake_dsrc(q->flow_mode))
srchost->srchost_bulk_flow_count++;
if (cake_ddst(q->flow_mode))
dsthost->dsthost_bulk_flow_count++;
}
if (q->buffer_used > q->buffer_max_used)
q->buffer_max_used = q->buffer_used;
if (q->buffer_used > q->buffer_limit) {
u32 dropped = 0;
while (q->buffer_used > q->buffer_limit) {
dropped++;
cake_drop(sch, to_free);
}
b->drop_overlimit += dropped;
}
return NET_XMIT_SUCCESS;
}
static struct sk_buff *cake_dequeue_one(struct Qdisc *sch)
{
struct cake_sched_data *q = qdisc_priv(sch);
struct cake_tin_data *b = &q->tins[q->cur_tin];
struct cake_flow *flow = &b->flows[q->cur_flow];
struct sk_buff *skb = NULL;
u32 len;
if (flow->head) {
skb = dequeue_head(flow);
len = qdisc_pkt_len(skb);
b->backlogs[q->cur_flow] -= len;
b->tin_backlog -= len;
sch->qstats.backlog -= len;
q->buffer_used -= skb->truesize;
sch->q.qlen--;
if (q->overflow_timeout)
cake_heapify(q, b->overflow_idx[q->cur_flow]);
}
return skb;
}
/* Discard leftover packets from a tin no longer in use. */
static void cake_clear_tin(struct Qdisc *sch, u16 tin)
{
struct cake_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb;
q->cur_tin = tin;
for (q->cur_flow = 0; q->cur_flow < CAKE_QUEUES; q->cur_flow++)
while (!!(skb = cake_dequeue_one(sch)))
kfree_skb(skb);
}
static struct sk_buff *cake_dequeue(struct Qdisc *sch)
{
struct cake_sched_data *q = qdisc_priv(sch);
struct cake_tin_data *b = &q->tins[q->cur_tin];
struct cake_host *srchost, *dsthost;
ktime_t now = ktime_get();
struct cake_flow *flow;
struct list_head *head;
bool first_flow = true;
struct sk_buff *skb;
u16 host_load;
u64 delay;
u32 len;
begin:
if (!sch->q.qlen)
return NULL;
/* global hard shaper */
if (ktime_after(q->time_next_packet, now) &&
ktime_after(q->failsafe_next_packet, now)) {
u64 next = min(ktime_to_ns(q->time_next_packet),
ktime_to_ns(q->failsafe_next_packet));
sch->qstats.overlimits++;
qdisc_watchdog_schedule_ns(&q->watchdog, next);
return NULL;
}
/* Choose a class to work on. */
if (!q->rate_ns) {
/* In unlimited mode, can't rely on shaper timings, just balance
* with DRR
*/
bool wrapped = false, empty = true;
while (b->tin_deficit < 0 ||
!(b->sparse_flow_count + b->bulk_flow_count)) {
if (b->tin_deficit <= 0)
b->tin_deficit += b->tin_quantum;
if (b->sparse_flow_count + b->bulk_flow_count)
empty = false;
q->cur_tin++;
b++;
if (q->cur_tin >= q->tin_cnt) {
q->cur_tin = 0;
b = q->tins;
if (wrapped) {
/* It's possible for q->qlen to be
* nonzero when we actually have no
* packets anywhere.
*/
if (empty)
return NULL;
} else {
wrapped = true;
}
}
}
} else {
/* In shaped mode, choose:
* - Highest-priority tin with queue and meeting schedule, or
* - The earliest-scheduled tin with queue.
*/
ktime_t best_time = KTIME_MAX;
int tin, best_tin = 0;
for (tin = 0; tin < q->tin_cnt; tin++) {
b = q->tins + tin;
if ((b->sparse_flow_count + b->bulk_flow_count) > 0) {
ktime_t time_to_pkt = \
ktime_sub(b->time_next_packet, now);
if (ktime_to_ns(time_to_pkt) <= 0 ||
ktime_compare(time_to_pkt,
best_time) <= 0) {
best_time = time_to_pkt;
best_tin = tin;
}
}
}
q->cur_tin = best_tin;
b = q->tins + best_tin;
/* No point in going further if no packets to deliver. */
if (unlikely(!(b->sparse_flow_count + b->bulk_flow_count)))
return NULL;
}
retry:
/* service this class */
head = &b->decaying_flows;
if (!first_flow || list_empty(head)) {
head = &b->new_flows;
if (list_empty(head)) {
head = &b->old_flows;
if (unlikely(list_empty(head))) {
head = &b->decaying_flows;
if (unlikely(list_empty(head)))
goto begin;
}
}
}
flow = list_first_entry(head, struct cake_flow, flowchain);
q->cur_flow = flow - b->flows;
first_flow = false;
/* triple isolation (modified DRR++) */
srchost = &b->hosts[flow->srchost];
dsthost = &b->hosts[flow->dsthost];
host_load = 1;
/* flow isolation (DRR++) */
if (flow->deficit <= 0) {
/* Keep all flows with deficits out of the sparse and decaying
* rotations. No non-empty flow can go into the decaying
* rotation, so they can't get deficits
*/
if (flow->set == CAKE_SET_SPARSE) {
if (flow->head) {
b->sparse_flow_count--;
b->bulk_flow_count++;
if (cake_dsrc(q->flow_mode))
srchost->srchost_bulk_flow_count++;
if (cake_ddst(q->flow_mode))
dsthost->dsthost_bulk_flow_count++;
flow->set = CAKE_SET_BULK;
} else {
/* we've moved it to the bulk rotation for
* correct deficit accounting but we still want
* to count it as a sparse flow, not a bulk one.
*/
flow->set = CAKE_SET_SPARSE_WAIT;
}
}
if (cake_dsrc(q->flow_mode))
host_load = max(host_load, srchost->srchost_bulk_flow_count);
if (cake_ddst(q->flow_mode))
host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
WARN_ON(host_load > CAKE_QUEUES);
/* The get_random_u16() is a way to apply dithering to avoid
* accumulating roundoff errors
*/
flow->deficit += (b->flow_quantum * quantum_div[host_load] +
get_random_u16()) >> 16;
list_move_tail(&flow->flowchain, &b->old_flows);
goto retry;
}
/* Retrieve a packet via the AQM */
while (1) {
skb = cake_dequeue_one(sch);
if (!skb) {
/* this queue was actually empty */
if (cobalt_queue_empty(&flow->cvars, &b->cparams, now))
b->unresponsive_flow_count--;
if (flow->cvars.p_drop || flow->cvars.count ||
ktime_before(now, flow->cvars.drop_next)) {
/* keep in the flowchain until the state has
* decayed to rest
*/
list_move_tail(&flow->flowchain,
&b->decaying_flows);
if (flow->set == CAKE_SET_BULK) {
b->bulk_flow_count--;
if (cake_dsrc(q->flow_mode))
srchost->srchost_bulk_flow_count--;
if (cake_ddst(q->flow_mode))
dsthost->dsthost_bulk_flow_count--;
b->decaying_flow_count++;
} else if (flow->set == CAKE_SET_SPARSE ||
flow->set == CAKE_SET_SPARSE_WAIT) {
b->sparse_flow_count--;
b->decaying_flow_count++;
}
flow->set = CAKE_SET_DECAYING;
} else {
/* remove empty queue from the flowchain */
list_del_init(&flow->flowchain);
if (flow->set == CAKE_SET_SPARSE ||
flow->set == CAKE_SET_SPARSE_WAIT)
b->sparse_flow_count--;
else if (flow->set == CAKE_SET_BULK) {
b->bulk_flow_count--;
if (cake_dsrc(q->flow_mode))
srchost->srchost_bulk_flow_count--;
if (cake_ddst(q->flow_mode))
dsthost->dsthost_bulk_flow_count--;
} else
b->decaying_flow_count--;
flow->set = CAKE_SET_NONE;
}
goto begin;
}
/* Last packet in queue may be marked, shouldn't be dropped */
if (!cobalt_should_drop(&flow->cvars, &b->cparams, now, skb,
(b->bulk_flow_count *
!!(q->rate_flags &
CAKE_FLAG_INGRESS))) ||
!flow->head)
break;
/* drop this packet, get another one */
if (q->rate_flags & CAKE_FLAG_INGRESS) {
len = cake_advance_shaper(q, b, skb,
now, true);
flow->deficit -= len;
b->tin_deficit -= len;
}
flow->dropped++;
b->tin_dropped++;
qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
qdisc_qstats_drop(sch);
kfree_skb(skb);
if (q->rate_flags & CAKE_FLAG_INGRESS)
goto retry;
}
b->tin_ecn_mark += !!flow->cvars.ecn_marked;
qdisc_bstats_update(sch, skb);
/* collect delay stats */
delay = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb)));
b->avge_delay = cake_ewma(b->avge_delay, delay, 8);
b->peak_delay = cake_ewma(b->peak_delay, delay,
delay > b->peak_delay ? 2 : 8);
b->base_delay = cake_ewma(b->base_delay, delay,
delay < b->base_delay ? 2 : 8);
len = cake_advance_shaper(q, b, skb, now, false);
flow->deficit -= len;
b->tin_deficit -= len;
if (ktime_after(q->time_next_packet, now) && sch->q.qlen) {
u64 next = min(ktime_to_ns(q->time_next_packet),
ktime_to_ns(q->failsafe_next_packet));
qdisc_watchdog_schedule_ns(&q->watchdog, next);
} else if (!sch->q.qlen) {
int i;
for (i = 0; i < q->tin_cnt; i++) {
if (q->tins[i].decaying_flow_count) {
ktime_t next = \
ktime_add_ns(now,
q->tins[i].cparams.target);
qdisc_watchdog_schedule_ns(&q->watchdog,
ktime_to_ns(next));
break;
}
}
}
if (q->overflow_timeout)
q->overflow_timeout--;
return skb;
}
static void cake_reset(struct Qdisc *sch)
{
struct cake_sched_data *q = qdisc_priv(sch);
u32 c;
if (!q->tins)
return;
for (c = 0; c < CAKE_MAX_TINS; c++)
cake_clear_tin(sch, c);
}
static const struct nla_policy cake_policy[TCA_CAKE_MAX + 1] = {
[TCA_CAKE_BASE_RATE64] = { .type = NLA_U64 },
[TCA_CAKE_DIFFSERV_MODE] = { .type = NLA_U32 },
[TCA_CAKE_ATM] = { .type = NLA_U32 },
[TCA_CAKE_FLOW_MODE] = { .type = NLA_U32 },
[TCA_CAKE_OVERHEAD] = { .type = NLA_S32 },
[TCA_CAKE_RTT] = { .type = NLA_U32 },
[TCA_CAKE_TARGET] = { .type = NLA_U32 },
[TCA_CAKE_AUTORATE] = { .type = NLA_U32 },
[TCA_CAKE_MEMORY] = { .type = NLA_U32 },
[TCA_CAKE_NAT] = { .type = NLA_U32 },
[TCA_CAKE_RAW] = { .type = NLA_U32 },
[TCA_CAKE_WASH] = { .type = NLA_U32 },
[TCA_CAKE_MPU] = { .type = NLA_U32 },
[TCA_CAKE_INGRESS] = { .type = NLA_U32 },
[TCA_CAKE_ACK_FILTER] = { .type = NLA_U32 },
[TCA_CAKE_SPLIT_GSO] = { .type = NLA_U32 },
[TCA_CAKE_FWMARK] = { .type = NLA_U32 },
};
static void cake_set_rate(struct cake_tin_data *b, u64 rate, u32 mtu,
u64 target_ns, u64 rtt_est_ns)
{
/* convert byte-rate into time-per-byte
* so it will always unwedge in reasonable time.
*/
static const u64 MIN_RATE = 64;
u32 byte_target = mtu;
u64 byte_target_ns;
u8 rate_shft = 0;
u64 rate_ns = 0;
b->flow_quantum = 1514;
if (rate) {
b->flow_quantum = max(min(rate >> 12, 1514ULL), 300ULL);
rate_shft = 34;
rate_ns = ((u64)NSEC_PER_SEC) << rate_shft;
rate_ns = div64_u64(rate_ns, max(MIN_RATE, rate));
while (!!(rate_ns >> 34)) {
rate_ns >>= 1;
rate_shft--;
}
} /* else unlimited, ie. zero delay */
b->tin_rate_bps = rate;
b->tin_rate_ns = rate_ns;
b->tin_rate_shft = rate_shft;
byte_target_ns = (byte_target * rate_ns) >> rate_shft;
b->cparams.target = max((byte_target_ns * 3) / 2, target_ns);
b->cparams.interval = max(rtt_est_ns +
b->cparams.target - target_ns,
b->cparams.target * 2);
b->cparams.mtu_time = byte_target_ns;
b->cparams.p_inc = 1 << 24; /* 1/256 */
b->cparams.p_dec = 1 << 20; /* 1/4096 */
}
static int cake_config_besteffort(struct Qdisc *sch)
{
struct cake_sched_data *q = qdisc_priv(sch);
struct cake_tin_data *b = &q->tins[0];
u32 mtu = psched_mtu(qdisc_dev(sch));
u64 rate = q->rate_bps;
q->tin_cnt = 1;
q->tin_index = besteffort;
q->tin_order = normal_order;
cake_set_rate(b, rate, mtu,
us_to_ns(q->target), us_to_ns(q->interval));
b->tin_quantum = 65535;
return 0;
}
static int cake_config_precedence(struct Qdisc *sch)
{
/* convert high-level (user visible) parameters into internal format */
struct cake_sched_data *q = qdisc_priv(sch);
u32 mtu = psched_mtu(qdisc_dev(sch));
u64 rate = q->rate_bps;
u32 quantum = 256;
u32 i;
q->tin_cnt = 8;
q->tin_index = precedence;
q->tin_order = normal_order;
for (i = 0; i < q->tin_cnt; i++) {
struct cake_tin_data *b = &q->tins[i];
cake_set_rate(b, rate, mtu, us_to_ns(q->target),
us_to_ns(q->interval));
b->tin_quantum = max_t(u16, 1U, quantum);
/* calculate next class's parameters */
rate *= 7;
rate >>= 3;
quantum *= 7;
quantum >>= 3;
}
return 0;
}
/* List of known Diffserv codepoints:
*
* Default Forwarding (DF/CS0) - Best Effort
* Max Throughput (TOS2)
* Min Delay (TOS4)
* LLT "La" (TOS5)
* Assured Forwarding 1 (AF1x) - x3
* Assured Forwarding 2 (AF2x) - x3
* Assured Forwarding 3 (AF3x) - x3
* Assured Forwarding 4 (AF4x) - x3
* Precedence Class 1 (CS1)
* Precedence Class 2 (CS2)
* Precedence Class 3 (CS3)
* Precedence Class 4 (CS4)
* Precedence Class 5 (CS5)
* Precedence Class 6 (CS6)
* Precedence Class 7 (CS7)
* Voice Admit (VA)
* Expedited Forwarding (EF)
* Lower Effort (LE)
*
* Total 26 codepoints.
*/
/* List of traffic classes in RFC 4594, updated by RFC 8622:
* (roughly descending order of contended priority)
* (roughly ascending order of uncontended throughput)
*
* Network Control (CS6,CS7) - routing traffic
* Telephony (EF,VA) - aka. VoIP streams
* Signalling (CS5) - VoIP setup
* Multimedia Conferencing (AF4x) - aka. video calls
* Realtime Interactive (CS4) - eg. games
* Multimedia Streaming (AF3x) - eg. YouTube, NetFlix, Twitch
* Broadcast Video (CS3)
* Low-Latency Data (AF2x,TOS4) - eg. database
* Ops, Admin, Management (CS2) - eg. ssh
* Standard Service (DF & unrecognised codepoints)
* High-Throughput Data (AF1x,TOS2) - eg. web traffic
* Low-Priority Data (LE,CS1) - eg. BitTorrent
*
* Total 12 traffic classes.
*/
static int cake_config_diffserv8(struct Qdisc *sch)
{
/* Pruned list of traffic classes for typical applications:
*
* Network Control (CS6, CS7)
* Minimum Latency (EF, VA, CS5, CS4)
* Interactive Shell (CS2)
* Low Latency Transactions (AF2x, TOS4)
* Video Streaming (AF4x, AF3x, CS3)
* Bog Standard (DF etc.)
* High Throughput (AF1x, TOS2, CS1)
* Background Traffic (LE)
*
* Total 8 traffic classes.
*/
struct cake_sched_data *q = qdisc_priv(sch);
u32 mtu = psched_mtu(qdisc_dev(sch));
u64 rate = q->rate_bps;
u32 quantum = 256;
u32 i;
q->tin_cnt = 8;
/* codepoint to class mapping */
q->tin_index = diffserv8;
q->tin_order = normal_order;
/* class characteristics */
for (i = 0; i < q->tin_cnt; i++) {
struct cake_tin_data *b = &q->tins[i];
cake_set_rate(b, rate, mtu, us_to_ns(q->target),
us_to_ns(q->interval));
b->tin_quantum = max_t(u16, 1U, quantum);
/* calculate next class's parameters */
rate *= 7;
rate >>= 3;
quantum *= 7;
quantum >>= 3;
}
return 0;
}
static int cake_config_diffserv4(struct Qdisc *sch)
{
/* Further pruned list of traffic classes for four-class system:
*
* Latency Sensitive (CS7, CS6, EF, VA, CS5, CS4)
* Streaming Media (AF4x, AF3x, CS3, AF2x, TOS4, CS2)
* Best Effort (DF, AF1x, TOS2, and those not specified)
* Background Traffic (LE, CS1)
*
* Total 4 traffic classes.
*/
struct cake_sched_data *q = qdisc_priv(sch);
u32 mtu = psched_mtu(qdisc_dev(sch));
u64 rate = q->rate_bps;
u32 quantum = 1024;
q->tin_cnt = 4;
/* codepoint to class mapping */
q->tin_index = diffserv4;
q->tin_order = bulk_order;
/* class characteristics */
cake_set_rate(&q->tins[0], rate, mtu,
us_to_ns(q->target), us_to_ns(q->interval));
cake_set_rate(&q->tins[1], rate >> 4, mtu,
us_to_ns(q->target), us_to_ns(q->interval));
cake_set_rate(&q->tins[2], rate >> 1, mtu,
us_to_ns(q->target), us_to_ns(q->interval));
cake_set_rate(&q->tins[3], rate >> 2, mtu,
us_to_ns(q->target), us_to_ns(q->interval));
/* bandwidth-sharing weights */
q->tins[0].tin_quantum = quantum;
q->tins[1].tin_quantum = quantum >> 4;
q->tins[2].tin_quantum = quantum >> 1;
q->tins[3].tin_quantum = quantum >> 2;
return 0;
}
static int cake_config_diffserv3(struct Qdisc *sch)
{
/* Simplified Diffserv structure with 3 tins.
* Latency Sensitive (CS7, CS6, EF, VA, TOS4)
* Best Effort
* Low Priority (LE, CS1)
*/
struct cake_sched_data *q = qdisc_priv(sch);
u32 mtu = psched_mtu(qdisc_dev(sch));
u64 rate = q->rate_bps;
u32 quantum = 1024;
q->tin_cnt = 3;
/* codepoint to class mapping */
q->tin_index = diffserv3;
q->tin_order = bulk_order;
/* class characteristics */
cake_set_rate(&q->tins[0], rate, mtu,
us_to_ns(q->target), us_to_ns(q->interval));
cake_set_rate(&q->tins[1], rate >> 4, mtu,
us_to_ns(q->target), us_to_ns(q->interval));
cake_set_rate(&q->tins[2], rate >> 2, mtu,
us_to_ns(q->target), us_to_ns(q->interval));
/* bandwidth-sharing weights */
q->tins[0].tin_quantum = quantum;
q->tins[1].tin_quantum = quantum >> 4;
q->tins[2].tin_quantum = quantum >> 2;
return 0;
}
static void cake_reconfigure(struct Qdisc *sch)
{
struct cake_sched_data *q = qdisc_priv(sch);
int c, ft;
switch (q->tin_mode) {
case CAKE_DIFFSERV_BESTEFFORT:
ft = cake_config_besteffort(sch);
break;
case CAKE_DIFFSERV_PRECEDENCE:
ft = cake_config_precedence(sch);
break;
case CAKE_DIFFSERV_DIFFSERV8:
ft = cake_config_diffserv8(sch);
break;
case CAKE_DIFFSERV_DIFFSERV4:
ft = cake_config_diffserv4(sch);
break;
case CAKE_DIFFSERV_DIFFSERV3:
default:
ft = cake_config_diffserv3(sch);
break;
}
for (c = q->tin_cnt; c < CAKE_MAX_TINS; c++) {
cake_clear_tin(sch, c);
q->tins[c].cparams.mtu_time = q->tins[ft].cparams.mtu_time;
}
q->rate_ns = q->tins[ft].tin_rate_ns;
q->rate_shft = q->tins[ft].tin_rate_shft;
if (q->buffer_config_limit) {
q->buffer_limit = q->buffer_config_limit;
} else if (q->rate_bps) {
u64 t = q->rate_bps * q->interval;
do_div(t, USEC_PER_SEC / 4);
q->buffer_limit = max_t(u32, t, 4U << 20);
} else {
q->buffer_limit = ~0;
}
sch->flags &= ~TCQ_F_CAN_BYPASS;
q->buffer_limit = min(q->buffer_limit,
max(sch->limit * psched_mtu(qdisc_dev(sch)),
q->buffer_config_limit));
}
static int cake_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
struct cake_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_CAKE_MAX + 1];
int err;
err = nla_parse_nested_deprecated(tb, TCA_CAKE_MAX, opt, cake_policy,
extack);
if (err < 0)
return err;
if (tb[TCA_CAKE_NAT]) {
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
q->flow_mode &= ~CAKE_FLOW_NAT_FLAG;
q->flow_mode |= CAKE_FLOW_NAT_FLAG *
!!nla_get_u32(tb[TCA_CAKE_NAT]);
#else
NL_SET_ERR_MSG_ATTR(extack, tb[TCA_CAKE_NAT],
"No conntrack support in kernel");
return -EOPNOTSUPP;
#endif
}
if (tb[TCA_CAKE_BASE_RATE64])
q->rate_bps = nla_get_u64(tb[TCA_CAKE_BASE_RATE64]);
if (tb[TCA_CAKE_DIFFSERV_MODE])
q->tin_mode = nla_get_u32(tb[TCA_CAKE_DIFFSERV_MODE]);
if (tb[TCA_CAKE_WASH]) {
if (!!nla_get_u32(tb[TCA_CAKE_WASH]))
q->rate_flags |= CAKE_FLAG_WASH;
else
q->rate_flags &= ~CAKE_FLAG_WASH;
}
if (tb[TCA_CAKE_FLOW_MODE])
q->flow_mode = ((q->flow_mode & CAKE_FLOW_NAT_FLAG) |
(nla_get_u32(tb[TCA_CAKE_FLOW_MODE]) &
CAKE_FLOW_MASK));
if (tb[TCA_CAKE_ATM])
q->atm_mode = nla_get_u32(tb[TCA_CAKE_ATM]);
if (tb[TCA_CAKE_OVERHEAD]) {
q->rate_overhead = nla_get_s32(tb[TCA_CAKE_OVERHEAD]);
q->rate_flags |= CAKE_FLAG_OVERHEAD;
q->max_netlen = 0;
q->max_adjlen = 0;
q->min_netlen = ~0;
q->min_adjlen = ~0;
}
if (tb[TCA_CAKE_RAW]) {
q->rate_flags &= ~CAKE_FLAG_OVERHEAD;
q->max_netlen = 0;
q->max_adjlen = 0;
q->min_netlen = ~0;
q->min_adjlen = ~0;
}
if (tb[TCA_CAKE_MPU])
q->rate_mpu = nla_get_u32(tb[TCA_CAKE_MPU]);
if (tb[TCA_CAKE_RTT]) {
q->interval = nla_get_u32(tb[TCA_CAKE_RTT]);
if (!q->interval)
q->interval = 1;
}
if (tb[TCA_CAKE_TARGET]) {
q->target = nla_get_u32(tb[TCA_CAKE_TARGET]);
if (!q->target)
q->target = 1;
}
if (tb[TCA_CAKE_AUTORATE]) {
if (!!nla_get_u32(tb[TCA_CAKE_AUTORATE]))
q->rate_flags |= CAKE_FLAG_AUTORATE_INGRESS;
else
q->rate_flags &= ~CAKE_FLAG_AUTORATE_INGRESS;
}
if (tb[TCA_CAKE_INGRESS]) {
if (!!nla_get_u32(tb[TCA_CAKE_INGRESS]))
q->rate_flags |= CAKE_FLAG_INGRESS;
else
q->rate_flags &= ~CAKE_FLAG_INGRESS;
}
if (tb[TCA_CAKE_ACK_FILTER])
q->ack_filter = nla_get_u32(tb[TCA_CAKE_ACK_FILTER]);
if (tb[TCA_CAKE_MEMORY])
q->buffer_config_limit = nla_get_u32(tb[TCA_CAKE_MEMORY]);
if (tb[TCA_CAKE_SPLIT_GSO]) {
if (!!nla_get_u32(tb[TCA_CAKE_SPLIT_GSO]))
q->rate_flags |= CAKE_FLAG_SPLIT_GSO;
else
q->rate_flags &= ~CAKE_FLAG_SPLIT_GSO;
}
if (tb[TCA_CAKE_FWMARK]) {
q->fwmark_mask = nla_get_u32(tb[TCA_CAKE_FWMARK]);
q->fwmark_shft = q->fwmark_mask ? __ffs(q->fwmark_mask) : 0;
}
if (q->tins) {
sch_tree_lock(sch);
cake_reconfigure(sch);
sch_tree_unlock(sch);
}
return 0;
}
static void cake_destroy(struct Qdisc *sch)
{
struct cake_sched_data *q = qdisc_priv(sch);
qdisc_watchdog_cancel(&q->watchdog);
tcf_block_put(q->block);
kvfree(q->tins);
}
static int cake_init(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
struct cake_sched_data *q = qdisc_priv(sch);
int i, j, err;
sch->limit = 10240;
q->tin_mode = CAKE_DIFFSERV_DIFFSERV3;
q->flow_mode = CAKE_FLOW_TRIPLE;
q->rate_bps = 0; /* unlimited by default */
q->interval = 100000; /* 100ms default */
q->target = 5000; /* 5ms: codel RFC argues
* for 5 to 10% of interval
*/
q->rate_flags |= CAKE_FLAG_SPLIT_GSO;
q->cur_tin = 0;
q->cur_flow = 0;
qdisc_watchdog_init(&q->watchdog, sch);
if (opt) {
err = cake_change(sch, opt, extack);
if (err)
return err;
}
err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
if (err)
return err;
quantum_div[0] = ~0;
for (i = 1; i <= CAKE_QUEUES; i++)
quantum_div[i] = 65535 / i;
q->tins = kvcalloc(CAKE_MAX_TINS, sizeof(struct cake_tin_data),
GFP_KERNEL);
if (!q->tins)
return -ENOMEM;
for (i = 0; i < CAKE_MAX_TINS; i++) {
struct cake_tin_data *b = q->tins + i;
INIT_LIST_HEAD(&b->new_flows);
INIT_LIST_HEAD(&b->old_flows);
INIT_LIST_HEAD(&b->decaying_flows);
b->sparse_flow_count = 0;
b->bulk_flow_count = 0;
b->decaying_flow_count = 0;
for (j = 0; j < CAKE_QUEUES; j++) {
struct cake_flow *flow = b->flows + j;
u32 k = j * CAKE_MAX_TINS + i;
INIT_LIST_HEAD(&flow->flowchain);
cobalt_vars_init(&flow->cvars);
q->overflow_heap[k].t = i;
q->overflow_heap[k].b = j;
b->overflow_idx[j] = k;
}
}
cake_reconfigure(sch);
q->avg_peak_bandwidth = q->rate_bps;
q->min_netlen = ~0;
q->min_adjlen = ~0;
return 0;
}
static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct cake_sched_data *q = qdisc_priv(sch);
struct nlattr *opts;
opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
if (!opts)
goto nla_put_failure;
if (nla_put_u64_64bit(skb, TCA_CAKE_BASE_RATE64, q->rate_bps,
TCA_CAKE_PAD))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_FLOW_MODE,
q->flow_mode & CAKE_FLOW_MASK))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_RTT, q->interval))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_TARGET, q->target))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_MEMORY, q->buffer_config_limit))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_AUTORATE,
!!(q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS)))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_INGRESS,
!!(q->rate_flags & CAKE_FLAG_INGRESS)))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, q->ack_filter))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_NAT,
!!(q->flow_mode & CAKE_FLOW_NAT_FLAG)))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_DIFFSERV_MODE, q->tin_mode))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_WASH,
!!(q->rate_flags & CAKE_FLAG_WASH)))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_OVERHEAD, q->rate_overhead))
goto nla_put_failure;
if (!(q->rate_flags & CAKE_FLAG_OVERHEAD))
if (nla_put_u32(skb, TCA_CAKE_RAW, 0))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_ATM, q->atm_mode))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_MPU, q->rate_mpu))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_SPLIT_GSO,
!!(q->rate_flags & CAKE_FLAG_SPLIT_GSO)))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_FWMARK, q->fwmark_mask))
goto nla_put_failure;
return nla_nest_end(skb, opts);
nla_put_failure:
return -1;
}
static int cake_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
{
struct nlattr *stats = nla_nest_start_noflag(d->skb, TCA_STATS_APP);
struct cake_sched_data *q = qdisc_priv(sch);
struct nlattr *tstats, *ts;
int i;
if (!stats)
return -1;
#define PUT_STAT_U32(attr, data) do { \
if (nla_put_u32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \
goto nla_put_failure; \
} while (0)
#define PUT_STAT_U64(attr, data) do { \
if (nla_put_u64_64bit(d->skb, TCA_CAKE_STATS_ ## attr, \
data, TCA_CAKE_STATS_PAD)) \
goto nla_put_failure; \
} while (0)
PUT_STAT_U64(CAPACITY_ESTIMATE64, q->avg_peak_bandwidth);
PUT_STAT_U32(MEMORY_LIMIT, q->buffer_limit);
PUT_STAT_U32(MEMORY_USED, q->buffer_max_used);
PUT_STAT_U32(AVG_NETOFF, ((q->avg_netoff + 0x8000) >> 16));
PUT_STAT_U32(MAX_NETLEN, q->max_netlen);
PUT_STAT_U32(MAX_ADJLEN, q->max_adjlen);
PUT_STAT_U32(MIN_NETLEN, q->min_netlen);
PUT_STAT_U32(MIN_ADJLEN, q->min_adjlen);
#undef PUT_STAT_U32
#undef PUT_STAT_U64
tstats = nla_nest_start_noflag(d->skb, TCA_CAKE_STATS_TIN_STATS);
if (!tstats)
goto nla_put_failure;
#define PUT_TSTAT_U32(attr, data) do { \
if (nla_put_u32(d->skb, TCA_CAKE_TIN_STATS_ ## attr, data)) \
goto nla_put_failure; \
} while (0)
#define PUT_TSTAT_U64(attr, data) do { \
if (nla_put_u64_64bit(d->skb, TCA_CAKE_TIN_STATS_ ## attr, \
data, TCA_CAKE_TIN_STATS_PAD)) \
goto nla_put_failure; \
} while (0)
for (i = 0; i < q->tin_cnt; i++) {
struct cake_tin_data *b = &q->tins[q->tin_order[i]];
ts = nla_nest_start_noflag(d->skb, i + 1);
if (!ts)
goto nla_put_failure;
PUT_TSTAT_U64(THRESHOLD_RATE64, b->tin_rate_bps);
PUT_TSTAT_U64(SENT_BYTES64, b->bytes);
PUT_TSTAT_U32(BACKLOG_BYTES, b->tin_backlog);
PUT_TSTAT_U32(TARGET_US,
ktime_to_us(ns_to_ktime(b->cparams.target)));
PUT_TSTAT_U32(INTERVAL_US,
ktime_to_us(ns_to_ktime(b->cparams.interval)));
PUT_TSTAT_U32(SENT_PACKETS, b->packets);
PUT_TSTAT_U32(DROPPED_PACKETS, b->tin_dropped);
PUT_TSTAT_U32(ECN_MARKED_PACKETS, b->tin_ecn_mark);
PUT_TSTAT_U32(ACKS_DROPPED_PACKETS, b->ack_drops);
PUT_TSTAT_U32(PEAK_DELAY_US,
ktime_to_us(ns_to_ktime(b->peak_delay)));
PUT_TSTAT_U32(AVG_DELAY_US,
ktime_to_us(ns_to_ktime(b->avge_delay)));
PUT_TSTAT_U32(BASE_DELAY_US,
ktime_to_us(ns_to_ktime(b->base_delay)));
PUT_TSTAT_U32(WAY_INDIRECT_HITS, b->way_hits);
PUT_TSTAT_U32(WAY_MISSES, b->way_misses);
PUT_TSTAT_U32(WAY_COLLISIONS, b->way_collisions);
PUT_TSTAT_U32(SPARSE_FLOWS, b->sparse_flow_count +
b->decaying_flow_count);
PUT_TSTAT_U32(BULK_FLOWS, b->bulk_flow_count);
PUT_TSTAT_U32(UNRESPONSIVE_FLOWS, b->unresponsive_flow_count);
PUT_TSTAT_U32(MAX_SKBLEN, b->max_skblen);
PUT_TSTAT_U32(FLOW_QUANTUM, b->flow_quantum);
nla_nest_end(d->skb, ts);
}
#undef PUT_TSTAT_U32
#undef PUT_TSTAT_U64
nla_nest_end(d->skb, tstats);
return nla_nest_end(d->skb, stats);
nla_put_failure:
nla_nest_cancel(d->skb, stats);
return -1;
}
static struct Qdisc *cake_leaf(struct Qdisc *sch, unsigned long arg)
{
return NULL;
}
static unsigned long cake_find(struct Qdisc *sch, u32 classid)
{
return 0;
}
static unsigned long cake_bind(struct Qdisc *sch, unsigned long parent,
u32 classid)
{
return 0;
}
static void cake_unbind(struct Qdisc *q, unsigned long cl)
{
}
static struct tcf_block *cake_tcf_block(struct Qdisc *sch, unsigned long cl,
struct netlink_ext_ack *extack)
{
struct cake_sched_data *q = qdisc_priv(sch);
if (cl)
return NULL;
return q->block;
}
static int cake_dump_class(struct Qdisc *sch, unsigned long cl,
struct sk_buff *skb, struct tcmsg *tcm)
{
tcm->tcm_handle |= TC_H_MIN(cl);
return 0;
}
static int cake_dump_class_stats(struct Qdisc *sch, unsigned long cl,
struct gnet_dump *d)
{
struct cake_sched_data *q = qdisc_priv(sch);
const struct cake_flow *flow = NULL;
struct gnet_stats_queue qs = { 0 };
struct nlattr *stats;
u32 idx = cl - 1;
if (idx < CAKE_QUEUES * q->tin_cnt) {
const struct cake_tin_data *b = \
&q->tins[q->tin_order[idx / CAKE_QUEUES]];
const struct sk_buff *skb;
flow = &b->flows[idx % CAKE_QUEUES];
if (flow->head) {
sch_tree_lock(sch);
skb = flow->head;
while (skb) {
qs.qlen++;
skb = skb->next;
}
sch_tree_unlock(sch);
}
qs.backlog = b->backlogs[idx % CAKE_QUEUES];
qs.drops = flow->dropped;
}
if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
return -1;
if (flow) {
ktime_t now = ktime_get();
stats = nla_nest_start_noflag(d->skb, TCA_STATS_APP);
if (!stats)
return -1;
#define PUT_STAT_U32(attr, data) do { \
if (nla_put_u32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \
goto nla_put_failure; \
} while (0)
#define PUT_STAT_S32(attr, data) do { \
if (nla_put_s32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \
goto nla_put_failure; \
} while (0)
PUT_STAT_S32(DEFICIT, flow->deficit);
PUT_STAT_U32(DROPPING, flow->cvars.dropping);
PUT_STAT_U32(COBALT_COUNT, flow->cvars.count);
PUT_STAT_U32(P_DROP, flow->cvars.p_drop);
if (flow->cvars.p_drop) {
PUT_STAT_S32(BLUE_TIMER_US,
ktime_to_us(
ktime_sub(now,
flow->cvars.blue_timer)));
}
if (flow->cvars.dropping) {
PUT_STAT_S32(DROP_NEXT_US,
ktime_to_us(
ktime_sub(now,
flow->cvars.drop_next)));
}
if (nla_nest_end(d->skb, stats) < 0)
return -1;
}
return 0;
nla_put_failure:
nla_nest_cancel(d->skb, stats);
return -1;
}
static void cake_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
struct cake_sched_data *q = qdisc_priv(sch);
unsigned int i, j;
if (arg->stop)
return;
for (i = 0; i < q->tin_cnt; i++) {
struct cake_tin_data *b = &q->tins[q->tin_order[i]];
for (j = 0; j < CAKE_QUEUES; j++) {
if (list_empty(&b->flows[j].flowchain)) {
arg->count++;
continue;
}
if (!tc_qdisc_stats_dump(sch, i * CAKE_QUEUES + j + 1,
arg))
break;
}
}
}
static const struct Qdisc_class_ops cake_class_ops = {
.leaf = cake_leaf,
.find = cake_find,
.tcf_block = cake_tcf_block,
.bind_tcf = cake_bind,
.unbind_tcf = cake_unbind,
.dump = cake_dump_class,
.dump_stats = cake_dump_class_stats,
.walk = cake_walk,
};
static struct Qdisc_ops cake_qdisc_ops __read_mostly = {
.cl_ops = &cake_class_ops,
.id = "cake",
.priv_size = sizeof(struct cake_sched_data),
.enqueue = cake_enqueue,
.dequeue = cake_dequeue,
.peek = qdisc_peek_dequeued,
.init = cake_init,
.reset = cake_reset,
.destroy = cake_destroy,
.change = cake_change,
.dump = cake_dump,
.dump_stats = cake_dump_stats,
.owner = THIS_MODULE,
};
static int __init cake_module_init(void)
{
return register_qdisc(&cake_qdisc_ops);
}
static void __exit cake_module_exit(void)
{
unregister_qdisc(&cake_qdisc_ops);
}
module_init(cake_module_init)
module_exit(cake_module_exit)
MODULE_AUTHOR("Jonathan Morton");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("The CAKE shaper.");
| linux-master | net/sched/sch_cake.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* net/sched/act_meta_prio.c IFE skb->priority metadata module
*
* copyright Jamal Hadi Salim (2015)
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/module.h>
#include <linux/init.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <uapi/linux/tc_act/tc_ife.h>
#include <net/tc_act/tc_ife.h>
static int skbprio_check(struct sk_buff *skb, struct tcf_meta_info *e)
{
return ife_check_meta_u32(skb->priority, e);
}
static int skbprio_encode(struct sk_buff *skb, void *skbdata,
struct tcf_meta_info *e)
{
u32 ifeprio = skb->priority; /* avoid having to cast skb->priority*/
return ife_encode_meta_u32(ifeprio, skbdata, e);
}
static int skbprio_decode(struct sk_buff *skb, void *data, u16 len)
{
u32 ifeprio = *(u32 *)data;
skb->priority = ntohl(ifeprio);
return 0;
}
static struct tcf_meta_ops ife_prio_ops = {
.metaid = IFE_META_PRIO,
.metatype = NLA_U32,
.name = "skbprio",
.synopsis = "skb prio metadata",
.check_presence = skbprio_check,
.encode = skbprio_encode,
.decode = skbprio_decode,
.get = ife_get_meta_u32,
.alloc = ife_alloc_meta_u32,
.owner = THIS_MODULE,
};
static int __init ifeprio_init_module(void)
{
return register_ife_op(&ife_prio_ops);
}
static void __exit ifeprio_cleanup_module(void)
{
unregister_ife_op(&ife_prio_ops);
}
module_init(ifeprio_init_module);
module_exit(ifeprio_cleanup_module);
MODULE_AUTHOR("Jamal Hadi Salim(2015)");
MODULE_DESCRIPTION("Inter-FE skb prio metadata action");
MODULE_LICENSE("GPL");
MODULE_ALIAS_IFE_META("skbprio");
| linux-master | net/sched/act_meta_skbprio.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008, Intel Corporation.
*
* Author: Alexander Duyck <[email protected]>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
struct multiq_sched_data {
u16 bands;
u16 max_bands;
u16 curband;
struct tcf_proto __rcu *filter_list;
struct tcf_block *block;
struct Qdisc **queues;
};
static struct Qdisc *
multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
{
struct multiq_sched_data *q = qdisc_priv(sch);
u32 band;
struct tcf_result res;
struct tcf_proto *fl = rcu_dereference_bh(q->filter_list);
int err;
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
err = tcf_classify(skb, NULL, fl, &res, false);
#ifdef CONFIG_NET_CLS_ACT
switch (err) {
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
case TC_ACT_TRAP:
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
fallthrough;
case TC_ACT_SHOT:
return NULL;
}
#endif
band = skb_get_queue_mapping(skb);
if (band >= q->bands)
return q->queues[0];
return q->queues[band];
}
static int
multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct Qdisc *qdisc;
int ret;
qdisc = multiq_classify(skb, sch, &ret);
#ifdef CONFIG_NET_CLS_ACT
if (qdisc == NULL) {
if (ret & __NET_XMIT_BYPASS)
qdisc_qstats_drop(sch);
__qdisc_drop(skb, to_free);
return ret;
}
#endif
ret = qdisc_enqueue(skb, qdisc, to_free);
if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
if (net_xmit_drop_count(ret))
qdisc_qstats_drop(sch);
return ret;
}
static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
{
struct multiq_sched_data *q = qdisc_priv(sch);
struct Qdisc *qdisc;
struct sk_buff *skb;
int band;
for (band = 0; band < q->bands; band++) {
/* cycle through bands to ensure fairness */
q->curband++;
if (q->curband >= q->bands)
q->curband = 0;
/* Check that target subqueue is available before
* pulling an skb to avoid head-of-line blocking.
*/
if (!netif_xmit_stopped(
netdev_get_tx_queue(qdisc_dev(sch), q->curband))) {
qdisc = q->queues[q->curband];
skb = qdisc->dequeue(qdisc);
if (skb) {
qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
}
}
}
return NULL;
}
static struct sk_buff *multiq_peek(struct Qdisc *sch)
{
struct multiq_sched_data *q = qdisc_priv(sch);
unsigned int curband = q->curband;
struct Qdisc *qdisc;
struct sk_buff *skb;
int band;
for (band = 0; band < q->bands; band++) {
/* cycle through bands to ensure fairness */
curband++;
if (curband >= q->bands)
curband = 0;
/* Check that target subqueue is available before
* pulling an skb to avoid head-of-line blocking.
*/
if (!netif_xmit_stopped(
netdev_get_tx_queue(qdisc_dev(sch), curband))) {
qdisc = q->queues[curband];
skb = qdisc->ops->peek(qdisc);
if (skb)
return skb;
}
}
return NULL;
}
static void
multiq_reset(struct Qdisc *sch)
{
u16 band;
struct multiq_sched_data *q = qdisc_priv(sch);
for (band = 0; band < q->bands; band++)
qdisc_reset(q->queues[band]);
q->curband = 0;
}
static void
multiq_destroy(struct Qdisc *sch)
{
int band;
struct multiq_sched_data *q = qdisc_priv(sch);
tcf_block_put(q->block);
for (band = 0; band < q->bands; band++)
qdisc_put(q->queues[band]);
kfree(q->queues);
}
static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
struct multiq_sched_data *q = qdisc_priv(sch);
struct tc_multiq_qopt *qopt;
struct Qdisc **removed;
int i, n_removed = 0;
if (!netif_is_multiqueue(qdisc_dev(sch)))
return -EOPNOTSUPP;
if (nla_len(opt) < sizeof(*qopt))
return -EINVAL;
qopt = nla_data(opt);
qopt->bands = qdisc_dev(sch)->real_num_tx_queues;
removed = kmalloc(sizeof(*removed) * (q->max_bands - q->bands),
GFP_KERNEL);
if (!removed)
return -ENOMEM;
sch_tree_lock(sch);
q->bands = qopt->bands;
for (i = q->bands; i < q->max_bands; i++) {
if (q->queues[i] != &noop_qdisc) {
struct Qdisc *child = q->queues[i];
q->queues[i] = &noop_qdisc;
qdisc_purge_queue(child);
removed[n_removed++] = child;
}
}
sch_tree_unlock(sch);
for (i = 0; i < n_removed; i++)
qdisc_put(removed[i]);
kfree(removed);
for (i = 0; i < q->bands; i++) {
if (q->queues[i] == &noop_qdisc) {
struct Qdisc *child, *old;
child = qdisc_create_dflt(sch->dev_queue,
&pfifo_qdisc_ops,
TC_H_MAKE(sch->handle,
i + 1), extack);
if (child) {
sch_tree_lock(sch);
old = q->queues[i];
q->queues[i] = child;
if (child != &noop_qdisc)
qdisc_hash_add(child, true);
if (old != &noop_qdisc)
qdisc_purge_queue(old);
sch_tree_unlock(sch);
qdisc_put(old);
}
}
}
return 0;
}
static int multiq_init(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
struct multiq_sched_data *q = qdisc_priv(sch);
int i, err;
q->queues = NULL;
if (!opt)
return -EINVAL;
err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
if (err)
return err;
q->max_bands = qdisc_dev(sch)->num_tx_queues;
q->queues = kcalloc(q->max_bands, sizeof(struct Qdisc *), GFP_KERNEL);
if (!q->queues)
return -ENOBUFS;
for (i = 0; i < q->max_bands; i++)
q->queues[i] = &noop_qdisc;
return multiq_tune(sch, opt, extack);
}
static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct multiq_sched_data *q = qdisc_priv(sch);
unsigned char *b = skb_tail_pointer(skb);
struct tc_multiq_qopt opt;
opt.bands = q->bands;
opt.max_bands = q->max_bands;
if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
goto nla_put_failure;
return skb->len;
nla_put_failure:
nlmsg_trim(skb, b);
return -1;
}
static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
struct Qdisc **old, struct netlink_ext_ack *extack)
{
struct multiq_sched_data *q = qdisc_priv(sch);
unsigned long band = arg - 1;
if (new == NULL)
new = &noop_qdisc;
*old = qdisc_replace(sch, new, &q->queues[band]);
return 0;
}
static struct Qdisc *
multiq_leaf(struct Qdisc *sch, unsigned long arg)
{
struct multiq_sched_data *q = qdisc_priv(sch);
unsigned long band = arg - 1;
return q->queues[band];
}
static unsigned long multiq_find(struct Qdisc *sch, u32 classid)
{
struct multiq_sched_data *q = qdisc_priv(sch);
unsigned long band = TC_H_MIN(classid);
if (band - 1 >= q->bands)
return 0;
return band;
}
static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent,
u32 classid)
{
return multiq_find(sch, classid);
}
static void multiq_unbind(struct Qdisc *q, unsigned long cl)
{
}
static int multiq_dump_class(struct Qdisc *sch, unsigned long cl,
struct sk_buff *skb, struct tcmsg *tcm)
{
struct multiq_sched_data *q = qdisc_priv(sch);
tcm->tcm_handle |= TC_H_MIN(cl);
tcm->tcm_info = q->queues[cl - 1]->handle;
return 0;
}
static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
struct gnet_dump *d)
{
struct multiq_sched_data *q = qdisc_priv(sch);
struct Qdisc *cl_q;
cl_q = q->queues[cl - 1];
if (gnet_stats_copy_basic(d, cl_q->cpu_bstats, &cl_q->bstats, true) < 0 ||
qdisc_qstats_copy(d, cl_q) < 0)
return -1;
return 0;
}
static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
struct multiq_sched_data *q = qdisc_priv(sch);
int band;
if (arg->stop)
return;
for (band = 0; band < q->bands; band++) {
if (!tc_qdisc_stats_dump(sch, band + 1, arg))
break;
}
}
static struct tcf_block *multiq_tcf_block(struct Qdisc *sch, unsigned long cl,
struct netlink_ext_ack *extack)
{
struct multiq_sched_data *q = qdisc_priv(sch);
if (cl)
return NULL;
return q->block;
}
static const struct Qdisc_class_ops multiq_class_ops = {
.graft = multiq_graft,
.leaf = multiq_leaf,
.find = multiq_find,
.walk = multiq_walk,
.tcf_block = multiq_tcf_block,
.bind_tcf = multiq_bind,
.unbind_tcf = multiq_unbind,
.dump = multiq_dump_class,
.dump_stats = multiq_dump_class_stats,
};
static struct Qdisc_ops multiq_qdisc_ops __read_mostly = {
.next = NULL,
.cl_ops = &multiq_class_ops,
.id = "multiq",
.priv_size = sizeof(struct multiq_sched_data),
.enqueue = multiq_enqueue,
.dequeue = multiq_dequeue,
.peek = multiq_peek,
.init = multiq_init,
.reset = multiq_reset,
.destroy = multiq_destroy,
.change = multiq_tune,
.dump = multiq_dump,
.owner = THIS_MODULE,
};
static int __init multiq_module_init(void)
{
return register_qdisc(&multiq_qdisc_ops);
}
static void __exit multiq_module_exit(void)
{
unregister_qdisc(&multiq_qdisc_ops);
}
module_init(multiq_module_init)
module_exit(multiq_module_exit)
MODULE_LICENSE("GPL");
| linux-master | net/sched/sch_multiq.c |
// SPDX-License-Identifier: GPL-2.0
/* net/sched/sch_taprio.c Time Aware Priority Scheduler
*
* Authors: Vinicius Costa Gomes <[email protected]>
*
*/
#include <linux/ethtool.h>
#include <linux/ethtool_netlink.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
#include <linux/time.h>
#include <net/gso.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
#include <net/sch_generic.h>
#include <net/sock.h>
#include <net/tcp.h>
#define TAPRIO_STAT_NOT_SET (~0ULL)
#include "sch_mqprio_lib.h"
static LIST_HEAD(taprio_list);
static struct static_key_false taprio_have_broken_mqprio;
static struct static_key_false taprio_have_working_mqprio;
#define TAPRIO_ALL_GATES_OPEN -1
#define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST)
#define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
#define TAPRIO_FLAGS_INVALID U32_MAX
struct sched_entry {
/* Durations between this GCL entry and the GCL entry where the
* respective traffic class gate closes
*/
u64 gate_duration[TC_MAX_QUEUE];
atomic_t budget[TC_MAX_QUEUE];
/* The qdisc makes some effort so that no packet leaves
* after this time
*/
ktime_t gate_close_time[TC_MAX_QUEUE];
struct list_head list;
/* Used to calculate when to advance the schedule */
ktime_t end_time;
ktime_t next_txtime;
int index;
u32 gate_mask;
u32 interval;
u8 command;
};
struct sched_gate_list {
/* Longest non-zero contiguous gate durations per traffic class,
* or 0 if a traffic class gate never opens during the schedule.
*/
u64 max_open_gate_duration[TC_MAX_QUEUE];
u32 max_frm_len[TC_MAX_QUEUE]; /* for the fast path */
u32 max_sdu[TC_MAX_QUEUE]; /* for dump */
struct rcu_head rcu;
struct list_head entries;
size_t num_entries;
ktime_t cycle_end_time;
s64 cycle_time;
s64 cycle_time_extension;
s64 base_time;
};
struct taprio_sched {
struct Qdisc **qdiscs;
struct Qdisc *root;
u32 flags;
enum tk_offsets tk_offset;
int clockid;
bool offloaded;
bool detected_mqprio;
bool broken_mqprio;
atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+
* speeds it's sub-nanoseconds per byte
*/
/* Protects the update side of the RCU protected current_entry */
spinlock_t current_entry_lock;
struct sched_entry __rcu *current_entry;
struct sched_gate_list __rcu *oper_sched;
struct sched_gate_list __rcu *admin_sched;
struct hrtimer advance_timer;
struct list_head taprio_list;
int cur_txq[TC_MAX_QUEUE];
u32 max_sdu[TC_MAX_QUEUE]; /* save info from the user */
u32 fp[TC_QOPT_MAX_QUEUE]; /* only for dump and offloading */
u32 txtime_delay;
};
struct __tc_taprio_qopt_offload {
refcount_t users;
struct tc_taprio_qopt_offload offload;
};
static void taprio_calculate_gate_durations(struct taprio_sched *q,
struct sched_gate_list *sched)
{
struct net_device *dev = qdisc_dev(q->root);
int num_tc = netdev_get_num_tc(dev);
struct sched_entry *entry, *cur;
int tc;
list_for_each_entry(entry, &sched->entries, list) {
u32 gates_still_open = entry->gate_mask;
/* For each traffic class, calculate each open gate duration,
* starting at this schedule entry and ending at the schedule
* entry containing a gate close event for that TC.
*/
cur = entry;
do {
if (!gates_still_open)
break;
for (tc = 0; tc < num_tc; tc++) {
if (!(gates_still_open & BIT(tc)))
continue;
if (cur->gate_mask & BIT(tc))
entry->gate_duration[tc] += cur->interval;
else
gates_still_open &= ~BIT(tc);
}
cur = list_next_entry_circular(cur, &sched->entries, list);
} while (cur != entry);
/* Keep track of the maximum gate duration for each traffic
* class, taking care to not confuse a traffic class which is
* temporarily closed with one that is always closed.
*/
for (tc = 0; tc < num_tc; tc++)
if (entry->gate_duration[tc] &&
sched->max_open_gate_duration[tc] < entry->gate_duration[tc])
sched->max_open_gate_duration[tc] = entry->gate_duration[tc];
}
}
static bool taprio_entry_allows_tx(ktime_t skb_end_time,
struct sched_entry *entry, int tc)
{
return ktime_before(skb_end_time, entry->gate_close_time[tc]);
}
static ktime_t sched_base_time(const struct sched_gate_list *sched)
{
if (!sched)
return KTIME_MAX;
return ns_to_ktime(sched->base_time);
}
static ktime_t taprio_mono_to_any(const struct taprio_sched *q, ktime_t mono)
{
/* This pairs with WRITE_ONCE() in taprio_parse_clockid() */
enum tk_offsets tk_offset = READ_ONCE(q->tk_offset);
switch (tk_offset) {
case TK_OFFS_MAX:
return mono;
default:
return ktime_mono_to_any(mono, tk_offset);
}
}
static ktime_t taprio_get_time(const struct taprio_sched *q)
{
return taprio_mono_to_any(q, ktime_get());
}
static void taprio_free_sched_cb(struct rcu_head *head)
{
struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu);
struct sched_entry *entry, *n;
list_for_each_entry_safe(entry, n, &sched->entries, list) {
list_del(&entry->list);
kfree(entry);
}
kfree(sched);
}
static void switch_schedules(struct taprio_sched *q,
struct sched_gate_list **admin,
struct sched_gate_list **oper)
{
rcu_assign_pointer(q->oper_sched, *admin);
rcu_assign_pointer(q->admin_sched, NULL);
if (*oper)
call_rcu(&(*oper)->rcu, taprio_free_sched_cb);
*oper = *admin;
*admin = NULL;
}
/* Get how much time has been already elapsed in the current cycle. */
static s32 get_cycle_time_elapsed(struct sched_gate_list *sched, ktime_t time)
{
ktime_t time_since_sched_start;
s32 time_elapsed;
time_since_sched_start = ktime_sub(time, sched->base_time);
div_s64_rem(time_since_sched_start, sched->cycle_time, &time_elapsed);
return time_elapsed;
}
static ktime_t get_interval_end_time(struct sched_gate_list *sched,
struct sched_gate_list *admin,
struct sched_entry *entry,
ktime_t intv_start)
{
s32 cycle_elapsed = get_cycle_time_elapsed(sched, intv_start);
ktime_t intv_end, cycle_ext_end, cycle_end;
cycle_end = ktime_add_ns(intv_start, sched->cycle_time - cycle_elapsed);
intv_end = ktime_add_ns(intv_start, entry->interval);
cycle_ext_end = ktime_add(cycle_end, sched->cycle_time_extension);
if (ktime_before(intv_end, cycle_end))
return intv_end;
else if (admin && admin != sched &&
ktime_after(admin->base_time, cycle_end) &&
ktime_before(admin->base_time, cycle_ext_end))
return admin->base_time;
else
return cycle_end;
}
static int length_to_duration(struct taprio_sched *q, int len)
{
return div_u64(len * atomic64_read(&q->picos_per_byte), PSEC_PER_NSEC);
}
static int duration_to_length(struct taprio_sched *q, u64 duration)
{
return div_u64(duration * PSEC_PER_NSEC, atomic64_read(&q->picos_per_byte));
}
/* Sets sched->max_sdu[] and sched->max_frm_len[] to the minimum between the
* q->max_sdu[] requested by the user and the max_sdu dynamically determined by
* the maximum open gate durations at the given link speed.
*/
static void taprio_update_queue_max_sdu(struct taprio_sched *q,
struct sched_gate_list *sched,
struct qdisc_size_table *stab)
{
struct net_device *dev = qdisc_dev(q->root);
int num_tc = netdev_get_num_tc(dev);
u32 max_sdu_from_user;
u32 max_sdu_dynamic;
u32 max_sdu;
int tc;
for (tc = 0; tc < num_tc; tc++) {
max_sdu_from_user = q->max_sdu[tc] ?: U32_MAX;
/* TC gate never closes => keep the queueMaxSDU
* selected by the user
*/
if (sched->max_open_gate_duration[tc] == sched->cycle_time) {
max_sdu_dynamic = U32_MAX;
} else {
u32 max_frm_len;
max_frm_len = duration_to_length(q, sched->max_open_gate_duration[tc]);
/* Compensate for L1 overhead from size table,
* but don't let the frame size go negative
*/
if (stab) {
max_frm_len -= stab->szopts.overhead;
max_frm_len = max_t(int, max_frm_len,
dev->hard_header_len + 1);
}
max_sdu_dynamic = max_frm_len - dev->hard_header_len;
if (max_sdu_dynamic > dev->max_mtu)
max_sdu_dynamic = U32_MAX;
}
max_sdu = min(max_sdu_dynamic, max_sdu_from_user);
if (max_sdu != U32_MAX) {
sched->max_frm_len[tc] = max_sdu + dev->hard_header_len;
sched->max_sdu[tc] = max_sdu;
} else {
sched->max_frm_len[tc] = U32_MAX; /* never oversized */
sched->max_sdu[tc] = 0;
}
}
}
/* Returns the entry corresponding to next available interval. If
* validate_interval is set, it only validates whether the timestamp occurs
* when the gate corresponding to the skb's traffic class is open.
*/
static struct sched_entry *find_entry_to_transmit(struct sk_buff *skb,
struct Qdisc *sch,
struct sched_gate_list *sched,
struct sched_gate_list *admin,
ktime_t time,
ktime_t *interval_start,
ktime_t *interval_end,
bool validate_interval)
{
ktime_t curr_intv_start, curr_intv_end, cycle_end, packet_transmit_time;
ktime_t earliest_txtime = KTIME_MAX, txtime, cycle, transmit_end_time;
struct sched_entry *entry = NULL, *entry_found = NULL;
struct taprio_sched *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
bool entry_available = false;
s32 cycle_elapsed;
int tc, n;
tc = netdev_get_prio_tc_map(dev, skb->priority);
packet_transmit_time = length_to_duration(q, qdisc_pkt_len(skb));
*interval_start = 0;
*interval_end = 0;
if (!sched)
return NULL;
cycle = sched->cycle_time;
cycle_elapsed = get_cycle_time_elapsed(sched, time);
curr_intv_end = ktime_sub_ns(time, cycle_elapsed);
cycle_end = ktime_add_ns(curr_intv_end, cycle);
list_for_each_entry(entry, &sched->entries, list) {
curr_intv_start = curr_intv_end;
curr_intv_end = get_interval_end_time(sched, admin, entry,
curr_intv_start);
if (ktime_after(curr_intv_start, cycle_end))
break;
if (!(entry->gate_mask & BIT(tc)) ||
packet_transmit_time > entry->interval)
continue;
txtime = entry->next_txtime;
if (ktime_before(txtime, time) || validate_interval) {
transmit_end_time = ktime_add_ns(time, packet_transmit_time);
if ((ktime_before(curr_intv_start, time) &&
ktime_before(transmit_end_time, curr_intv_end)) ||
(ktime_after(curr_intv_start, time) && !validate_interval)) {
entry_found = entry;
*interval_start = curr_intv_start;
*interval_end = curr_intv_end;
break;
} else if (!entry_available && !validate_interval) {
/* Here, we are just trying to find out the
* first available interval in the next cycle.
*/
entry_available = true;
entry_found = entry;
*interval_start = ktime_add_ns(curr_intv_start, cycle);
*interval_end = ktime_add_ns(curr_intv_end, cycle);
}
} else if (ktime_before(txtime, earliest_txtime) &&
!entry_available) {
earliest_txtime = txtime;
entry_found = entry;
n = div_s64(ktime_sub(txtime, curr_intv_start), cycle);
*interval_start = ktime_add(curr_intv_start, n * cycle);
*interval_end = ktime_add(curr_intv_end, n * cycle);
}
}
return entry_found;
}
static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch)
{
struct taprio_sched *q = qdisc_priv(sch);
struct sched_gate_list *sched, *admin;
ktime_t interval_start, interval_end;
struct sched_entry *entry;
rcu_read_lock();
sched = rcu_dereference(q->oper_sched);
admin = rcu_dereference(q->admin_sched);
entry = find_entry_to_transmit(skb, sch, sched, admin, skb->tstamp,
&interval_start, &interval_end, true);
rcu_read_unlock();
return entry;
}
static bool taprio_flags_valid(u32 flags)
{
/* Make sure no other flag bits are set. */
if (flags & ~(TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST |
TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
return false;
/* txtime-assist and full offload are mutually exclusive */
if ((flags & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) &&
(flags & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
return false;
return true;
}
/* This returns the tstamp value set by TCP in terms of the set clock. */
static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb)
{
unsigned int offset = skb_network_offset(skb);
const struct ipv6hdr *ipv6h;
const struct iphdr *iph;
struct ipv6hdr _ipv6h;
ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
if (!ipv6h)
return 0;
if (ipv6h->version == 4) {
iph = (struct iphdr *)ipv6h;
offset += iph->ihl * 4;
/* special-case 6in4 tunnelling, as that is a common way to get
* v6 connectivity in the home
*/
if (iph->protocol == IPPROTO_IPV6) {
ipv6h = skb_header_pointer(skb, offset,
sizeof(_ipv6h), &_ipv6h);
if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP)
return 0;
} else if (iph->protocol != IPPROTO_TCP) {
return 0;
}
} else if (ipv6h->version == 6 && ipv6h->nexthdr != IPPROTO_TCP) {
return 0;
}
return taprio_mono_to_any(q, skb->skb_mstamp_ns);
}
/* There are a few scenarios where we will have to modify the txtime from
* what is read from next_txtime in sched_entry. They are:
* 1. If txtime is in the past,
* a. The gate for the traffic class is currently open and packet can be
* transmitted before it closes, schedule the packet right away.
* b. If the gate corresponding to the traffic class is going to open later
* in the cycle, set the txtime of packet to the interval start.
* 2. If txtime is in the future, there are packets corresponding to the
* current traffic class waiting to be transmitted. So, the following
* possibilities exist:
* a. We can transmit the packet before the window containing the txtime
* closes.
* b. The window might close before the transmission can be completed
* successfully. So, schedule the packet in the next open window.
*/
static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch)
{
ktime_t transmit_end_time, interval_end, interval_start, tcp_tstamp;
struct taprio_sched *q = qdisc_priv(sch);
struct sched_gate_list *sched, *admin;
ktime_t minimum_time, now, txtime;
int len, packet_transmit_time;
struct sched_entry *entry;
bool sched_changed;
now = taprio_get_time(q);
minimum_time = ktime_add_ns(now, q->txtime_delay);
tcp_tstamp = get_tcp_tstamp(q, skb);
minimum_time = max_t(ktime_t, minimum_time, tcp_tstamp);
rcu_read_lock();
admin = rcu_dereference(q->admin_sched);
sched = rcu_dereference(q->oper_sched);
if (admin && ktime_after(minimum_time, admin->base_time))
switch_schedules(q, &admin, &sched);
/* Until the schedule starts, all the queues are open */
if (!sched || ktime_before(minimum_time, sched->base_time)) {
txtime = minimum_time;
goto done;
}
len = qdisc_pkt_len(skb);
packet_transmit_time = length_to_duration(q, len);
do {
sched_changed = false;
entry = find_entry_to_transmit(skb, sch, sched, admin,
minimum_time,
&interval_start, &interval_end,
false);
if (!entry) {
txtime = 0;
goto done;
}
txtime = entry->next_txtime;
txtime = max_t(ktime_t, txtime, minimum_time);
txtime = max_t(ktime_t, txtime, interval_start);
if (admin && admin != sched &&
ktime_after(txtime, admin->base_time)) {
sched = admin;
sched_changed = true;
continue;
}
transmit_end_time = ktime_add(txtime, packet_transmit_time);
minimum_time = transmit_end_time;
/* Update the txtime of current entry to the next time it's
* interval starts.
*/
if (ktime_after(transmit_end_time, interval_end))
entry->next_txtime = ktime_add(interval_start, sched->cycle_time);
} while (sched_changed || ktime_after(transmit_end_time, interval_end));
entry->next_txtime = transmit_end_time;
done:
rcu_read_unlock();
return txtime;
}
/* Devices with full offload are expected to honor this in hardware */
static bool taprio_skb_exceeds_queue_max_sdu(struct Qdisc *sch,
struct sk_buff *skb)
{
struct taprio_sched *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
struct sched_gate_list *sched;
int prio = skb->priority;
bool exceeds = false;
u8 tc;
tc = netdev_get_prio_tc_map(dev, prio);
rcu_read_lock();
sched = rcu_dereference(q->oper_sched);
if (sched && skb->len > sched->max_frm_len[tc])
exceeds = true;
rcu_read_unlock();
return exceeds;
}
static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch,
struct Qdisc *child, struct sk_buff **to_free)
{
struct taprio_sched *q = qdisc_priv(sch);
/* sk_flags are only safe to use on full sockets. */
if (skb->sk && sk_fullsock(skb->sk) && sock_flag(skb->sk, SOCK_TXTIME)) {
if (!is_valid_interval(skb, sch))
return qdisc_drop(skb, sch, to_free);
} else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
skb->tstamp = get_packet_txtime(skb, sch);
if (!skb->tstamp)
return qdisc_drop(skb, sch, to_free);
}
qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
return qdisc_enqueue(skb, child, to_free);
}
static int taprio_enqueue_segmented(struct sk_buff *skb, struct Qdisc *sch,
struct Qdisc *child,
struct sk_buff **to_free)
{
unsigned int slen = 0, numsegs = 0, len = qdisc_pkt_len(skb);
netdev_features_t features = netif_skb_features(skb);
struct sk_buff *segs, *nskb;
int ret;
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
if (IS_ERR_OR_NULL(segs))
return qdisc_drop(skb, sch, to_free);
skb_list_walk_safe(segs, segs, nskb) {
skb_mark_not_on_list(segs);
qdisc_skb_cb(segs)->pkt_len = segs->len;
slen += segs->len;
/* FIXME: we should be segmenting to a smaller size
* rather than dropping these
*/
if (taprio_skb_exceeds_queue_max_sdu(sch, segs))
ret = qdisc_drop(segs, sch, to_free);
else
ret = taprio_enqueue_one(segs, sch, child, to_free);
if (ret != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret))
qdisc_qstats_drop(sch);
} else {
numsegs++;
}
}
if (numsegs > 1)
qdisc_tree_reduce_backlog(sch, 1 - numsegs, len - slen);
consume_skb(skb);
return numsegs > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
}
/* Will not be called in the full offload case, since the TX queues are
* attached to the Qdisc created using qdisc_create_dflt()
*/
static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct taprio_sched *q = qdisc_priv(sch);
struct Qdisc *child;
int queue;
queue = skb_get_queue_mapping(skb);
child = q->qdiscs[queue];
if (unlikely(!child))
return qdisc_drop(skb, sch, to_free);
if (taprio_skb_exceeds_queue_max_sdu(sch, skb)) {
/* Large packets might not be transmitted when the transmission
* duration exceeds any configured interval. Therefore, segment
* the skb into smaller chunks. Drivers with full offload are
* expected to handle this in hardware.
*/
if (skb_is_gso(skb))
return taprio_enqueue_segmented(skb, sch, child,
to_free);
return qdisc_drop(skb, sch, to_free);
}
return taprio_enqueue_one(skb, sch, child, to_free);
}
static struct sk_buff *taprio_peek(struct Qdisc *sch)
{
WARN_ONCE(1, "taprio only supports operating as root qdisc, peek() not implemented");
return NULL;
}
static void taprio_set_budgets(struct taprio_sched *q,
struct sched_gate_list *sched,
struct sched_entry *entry)
{
struct net_device *dev = qdisc_dev(q->root);
int num_tc = netdev_get_num_tc(dev);
int tc, budget;
for (tc = 0; tc < num_tc; tc++) {
/* Traffic classes which never close have infinite budget */
if (entry->gate_duration[tc] == sched->cycle_time)
budget = INT_MAX;
else
budget = div64_u64((u64)entry->gate_duration[tc] * PSEC_PER_NSEC,
atomic64_read(&q->picos_per_byte));
atomic_set(&entry->budget[tc], budget);
}
}
/* When an skb is sent, it consumes from the budget of all traffic classes */
static int taprio_update_budgets(struct sched_entry *entry, size_t len,
int tc_consumed, int num_tc)
{
int tc, budget, new_budget = 0;
for (tc = 0; tc < num_tc; tc++) {
budget = atomic_read(&entry->budget[tc]);
/* Don't consume from infinite budget */
if (budget == INT_MAX) {
if (tc == tc_consumed)
new_budget = budget;
continue;
}
if (tc == tc_consumed)
new_budget = atomic_sub_return(len, &entry->budget[tc]);
else
atomic_sub(len, &entry->budget[tc]);
}
return new_budget;
}
static struct sk_buff *taprio_dequeue_from_txq(struct Qdisc *sch, int txq,
struct sched_entry *entry,
u32 gate_mask)
{
struct taprio_sched *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
struct Qdisc *child = q->qdiscs[txq];
int num_tc = netdev_get_num_tc(dev);
struct sk_buff *skb;
ktime_t guard;
int prio;
int len;
u8 tc;
if (unlikely(!child))
return NULL;
if (TXTIME_ASSIST_IS_ENABLED(q->flags))
goto skip_peek_checks;
skb = child->ops->peek(child);
if (!skb)
return NULL;
prio = skb->priority;
tc = netdev_get_prio_tc_map(dev, prio);
if (!(gate_mask & BIT(tc)))
return NULL;
len = qdisc_pkt_len(skb);
guard = ktime_add_ns(taprio_get_time(q), length_to_duration(q, len));
/* In the case that there's no gate entry, there's no
* guard band ...
*/
if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
!taprio_entry_allows_tx(guard, entry, tc))
return NULL;
/* ... and no budget. */
if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
taprio_update_budgets(entry, len, tc, num_tc) < 0)
return NULL;
skip_peek_checks:
skb = child->ops->dequeue(child);
if (unlikely(!skb))
return NULL;
qdisc_bstats_update(sch, skb);
qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
return skb;
}
static void taprio_next_tc_txq(struct net_device *dev, int tc, int *txq)
{
int offset = dev->tc_to_txq[tc].offset;
int count = dev->tc_to_txq[tc].count;
(*txq)++;
if (*txq == offset + count)
*txq = offset;
}
/* Prioritize higher traffic classes, and select among TXQs belonging to the
* same TC using round robin
*/
static struct sk_buff *taprio_dequeue_tc_priority(struct Qdisc *sch,
struct sched_entry *entry,
u32 gate_mask)
{
struct taprio_sched *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
int num_tc = netdev_get_num_tc(dev);
struct sk_buff *skb;
int tc;
for (tc = num_tc - 1; tc >= 0; tc--) {
int first_txq = q->cur_txq[tc];
if (!(gate_mask & BIT(tc)))
continue;
do {
skb = taprio_dequeue_from_txq(sch, q->cur_txq[tc],
entry, gate_mask);
taprio_next_tc_txq(dev, tc, &q->cur_txq[tc]);
if (q->cur_txq[tc] >= dev->num_tx_queues)
q->cur_txq[tc] = first_txq;
if (skb)
return skb;
} while (q->cur_txq[tc] != first_txq);
}
return NULL;
}
/* Broken way of prioritizing smaller TXQ indices and ignoring the traffic
* class other than to determine whether the gate is open or not
*/
static struct sk_buff *taprio_dequeue_txq_priority(struct Qdisc *sch,
struct sched_entry *entry,
u32 gate_mask)
{
struct net_device *dev = qdisc_dev(sch);
struct sk_buff *skb;
int i;
for (i = 0; i < dev->num_tx_queues; i++) {
skb = taprio_dequeue_from_txq(sch, i, entry, gate_mask);
if (skb)
return skb;
}
return NULL;
}
/* Will not be called in the full offload case, since the TX queues are
* attached to the Qdisc created using qdisc_create_dflt()
*/
static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
{
struct taprio_sched *q = qdisc_priv(sch);
struct sk_buff *skb = NULL;
struct sched_entry *entry;
u32 gate_mask;
rcu_read_lock();
entry = rcu_dereference(q->current_entry);
/* if there's no entry, it means that the schedule didn't
* start yet, so force all gates to be open, this is in
* accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5
* "AdminGateStates"
*/
gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
if (!gate_mask)
goto done;
if (static_branch_unlikely(&taprio_have_broken_mqprio) &&
!static_branch_likely(&taprio_have_working_mqprio)) {
/* Single NIC kind which is broken */
skb = taprio_dequeue_txq_priority(sch, entry, gate_mask);
} else if (static_branch_likely(&taprio_have_working_mqprio) &&
!static_branch_unlikely(&taprio_have_broken_mqprio)) {
/* Single NIC kind which prioritizes properly */
skb = taprio_dequeue_tc_priority(sch, entry, gate_mask);
} else {
/* Mixed NIC kinds present in system, need dynamic testing */
if (q->broken_mqprio)
skb = taprio_dequeue_txq_priority(sch, entry, gate_mask);
else
skb = taprio_dequeue_tc_priority(sch, entry, gate_mask);
}
done:
rcu_read_unlock();
return skb;
}
static bool should_restart_cycle(const struct sched_gate_list *oper,
const struct sched_entry *entry)
{
if (list_is_last(&entry->list, &oper->entries))
return true;
if (ktime_compare(entry->end_time, oper->cycle_end_time) == 0)
return true;
return false;
}
static bool should_change_schedules(const struct sched_gate_list *admin,
const struct sched_gate_list *oper,
ktime_t end_time)
{
ktime_t next_base_time, extension_time;
if (!admin)
return false;
next_base_time = sched_base_time(admin);
/* This is the simple case, the end_time would fall after
* the next schedule base_time.
*/
if (ktime_compare(next_base_time, end_time) <= 0)
return true;
/* This is the cycle_time_extension case, if the end_time
* plus the amount that can be extended would fall after the
* next schedule base_time, we can extend the current schedule
* for that amount.
*/
extension_time = ktime_add_ns(end_time, oper->cycle_time_extension);
/* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about
* how precisely the extension should be made. So after
* conformance testing, this logic may change.
*/
if (ktime_compare(next_base_time, extension_time) <= 0)
return true;
return false;
}
static enum hrtimer_restart advance_sched(struct hrtimer *timer)
{
struct taprio_sched *q = container_of(timer, struct taprio_sched,
advance_timer);
struct net_device *dev = qdisc_dev(q->root);
struct sched_gate_list *oper, *admin;
int num_tc = netdev_get_num_tc(dev);
struct sched_entry *entry, *next;
struct Qdisc *sch = q->root;
ktime_t end_time;
int tc;
spin_lock(&q->current_entry_lock);
entry = rcu_dereference_protected(q->current_entry,
lockdep_is_held(&q->current_entry_lock));
oper = rcu_dereference_protected(q->oper_sched,
lockdep_is_held(&q->current_entry_lock));
admin = rcu_dereference_protected(q->admin_sched,
lockdep_is_held(&q->current_entry_lock));
if (!oper)
switch_schedules(q, &admin, &oper);
/* This can happen in two cases: 1. this is the very first run
* of this function (i.e. we weren't running any schedule
* previously); 2. The previous schedule just ended. The first
* entry of all schedules are pre-calculated during the
* schedule initialization.
*/
if (unlikely(!entry || entry->end_time == oper->base_time)) {
next = list_first_entry(&oper->entries, struct sched_entry,
list);
end_time = next->end_time;
goto first_run;
}
if (should_restart_cycle(oper, entry)) {
next = list_first_entry(&oper->entries, struct sched_entry,
list);
oper->cycle_end_time = ktime_add_ns(oper->cycle_end_time,
oper->cycle_time);
} else {
next = list_next_entry(entry, list);
}
end_time = ktime_add_ns(entry->end_time, next->interval);
end_time = min_t(ktime_t, end_time, oper->cycle_end_time);
for (tc = 0; tc < num_tc; tc++) {
if (next->gate_duration[tc] == oper->cycle_time)
next->gate_close_time[tc] = KTIME_MAX;
else
next->gate_close_time[tc] = ktime_add_ns(entry->end_time,
next->gate_duration[tc]);
}
if (should_change_schedules(admin, oper, end_time)) {
/* Set things so the next time this runs, the new
* schedule runs.
*/
end_time = sched_base_time(admin);
switch_schedules(q, &admin, &oper);
}
next->end_time = end_time;
taprio_set_budgets(q, oper, next);
first_run:
rcu_assign_pointer(q->current_entry, next);
spin_unlock(&q->current_entry_lock);
hrtimer_set_expires(&q->advance_timer, end_time);
rcu_read_lock();
__netif_schedule(sch);
rcu_read_unlock();
return HRTIMER_RESTART;
}
static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = {
[TCA_TAPRIO_SCHED_ENTRY_INDEX] = { .type = NLA_U32 },
[TCA_TAPRIO_SCHED_ENTRY_CMD] = { .type = NLA_U8 },
[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK] = { .type = NLA_U32 },
[TCA_TAPRIO_SCHED_ENTRY_INTERVAL] = { .type = NLA_U32 },
};
static const struct nla_policy taprio_tc_policy[TCA_TAPRIO_TC_ENTRY_MAX + 1] = {
[TCA_TAPRIO_TC_ENTRY_INDEX] = { .type = NLA_U32 },
[TCA_TAPRIO_TC_ENTRY_MAX_SDU] = { .type = NLA_U32 },
[TCA_TAPRIO_TC_ENTRY_FP] = NLA_POLICY_RANGE(NLA_U32,
TC_FP_EXPRESS,
TC_FP_PREEMPTIBLE),
};
static struct netlink_range_validation_signed taprio_cycle_time_range = {
.min = 0,
.max = INT_MAX,
};
static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
[TCA_TAPRIO_ATTR_PRIOMAP] = {
.len = sizeof(struct tc_mqprio_qopt)
},
[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST] = { .type = NLA_NESTED },
[TCA_TAPRIO_ATTR_SCHED_BASE_TIME] = { .type = NLA_S64 },
[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] = { .type = NLA_NESTED },
[TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 },
[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] =
NLA_POLICY_FULL_RANGE_SIGNED(NLA_S64, &taprio_cycle_time_range),
[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
[TCA_TAPRIO_ATTR_FLAGS] = { .type = NLA_U32 },
[TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 },
[TCA_TAPRIO_ATTR_TC_ENTRY] = { .type = NLA_NESTED },
};
static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb,
struct sched_entry *entry,
struct netlink_ext_ack *extack)
{
int min_duration = length_to_duration(q, ETH_ZLEN);
u32 interval = 0;
if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD])
entry->command = nla_get_u8(
tb[TCA_TAPRIO_SCHED_ENTRY_CMD]);
if (tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK])
entry->gate_mask = nla_get_u32(
tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]);
if (tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL])
interval = nla_get_u32(
tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]);
/* The interval should allow at least the minimum ethernet
* frame to go out.
*/
if (interval < min_duration) {
NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
return -EINVAL;
}
entry->interval = interval;
return 0;
}
static int parse_sched_entry(struct taprio_sched *q, struct nlattr *n,
struct sched_entry *entry, int index,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { };
int err;
err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, n,
entry_policy, NULL);
if (err < 0) {
NL_SET_ERR_MSG(extack, "Could not parse nested entry");
return -EINVAL;
}
entry->index = index;
return fill_sched_entry(q, tb, entry, extack);
}
static int parse_sched_list(struct taprio_sched *q, struct nlattr *list,
struct sched_gate_list *sched,
struct netlink_ext_ack *extack)
{
struct nlattr *n;
int err, rem;
int i = 0;
if (!list)
return -EINVAL;
nla_for_each_nested(n, list, rem) {
struct sched_entry *entry;
if (nla_type(n) != TCA_TAPRIO_SCHED_ENTRY) {
NL_SET_ERR_MSG(extack, "Attribute is not of type 'entry'");
continue;
}
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
NL_SET_ERR_MSG(extack, "Not enough memory for entry");
return -ENOMEM;
}
err = parse_sched_entry(q, n, entry, i, extack);
if (err < 0) {
kfree(entry);
return err;
}
list_add_tail(&entry->list, &sched->entries);
i++;
}
sched->num_entries = i;
return i;
}
static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
struct sched_gate_list *new,
struct netlink_ext_ack *extack)
{
int err = 0;
if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]) {
NL_SET_ERR_MSG(extack, "Adding a single entry is not supported");
return -ENOTSUPP;
}
if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME])
new->base_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]);
if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION])
new->cycle_time_extension = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]);
if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME])
new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]);
if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST])
err = parse_sched_list(q, tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST],
new, extack);
if (err < 0)
return err;
if (!new->cycle_time) {
struct sched_entry *entry;
ktime_t cycle = 0;
list_for_each_entry(entry, &new->entries, list)
cycle = ktime_add_ns(cycle, entry->interval);
if (!cycle) {
NL_SET_ERR_MSG(extack, "'cycle_time' can never be 0");
return -EINVAL;
}
if (cycle < 0 || cycle > INT_MAX) {
NL_SET_ERR_MSG(extack, "'cycle_time' is too big");
return -EINVAL;
}
new->cycle_time = cycle;
}
taprio_calculate_gate_durations(q, new);
return 0;
}
static int taprio_parse_mqprio_opt(struct net_device *dev,
struct tc_mqprio_qopt *qopt,
struct netlink_ext_ack *extack,
u32 taprio_flags)
{
bool allow_overlapping_txqs = TXTIME_ASSIST_IS_ENABLED(taprio_flags);
if (!qopt && !dev->num_tc) {
NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
return -EINVAL;
}
/* If num_tc is already set, it means that the user already
* configured the mqprio part
*/
if (dev->num_tc)
return 0;
/* taprio imposes that traffic classes map 1:n to tx queues */
if (qopt->num_tc > dev->num_tx_queues) {
NL_SET_ERR_MSG(extack, "Number of traffic classes is greater than number of HW queues");
return -EINVAL;
}
/* For some reason, in txtime-assist mode, we allow TXQ ranges for
* different TCs to overlap, and just validate the TXQ ranges.
*/
return mqprio_validate_qopt(dev, qopt, true, allow_overlapping_txqs,
extack);
}
static int taprio_get_start_time(struct Qdisc *sch,
struct sched_gate_list *sched,
ktime_t *start)
{
struct taprio_sched *q = qdisc_priv(sch);
ktime_t now, base, cycle;
s64 n;
base = sched_base_time(sched);
now = taprio_get_time(q);
if (ktime_after(base, now)) {
*start = base;
return 0;
}
cycle = sched->cycle_time;
/* The qdisc is expected to have at least one sched_entry. Moreover,
* any entry must have 'interval' > 0. Thus if the cycle time is zero,
* something went really wrong. In that case, we should warn about this
* inconsistent state and return error.
*/
if (WARN_ON(!cycle))
return -EFAULT;
/* Schedule the start time for the beginning of the next
* cycle.
*/
n = div64_s64(ktime_sub_ns(now, base), cycle);
*start = ktime_add_ns(base, (n + 1) * cycle);
return 0;
}
static void setup_first_end_time(struct taprio_sched *q,
struct sched_gate_list *sched, ktime_t base)
{
struct net_device *dev = qdisc_dev(q->root);
int num_tc = netdev_get_num_tc(dev);
struct sched_entry *first;
ktime_t cycle;
int tc;
first = list_first_entry(&sched->entries,
struct sched_entry, list);
cycle = sched->cycle_time;
/* FIXME: find a better place to do this */
sched->cycle_end_time = ktime_add_ns(base, cycle);
first->end_time = ktime_add_ns(base, first->interval);
taprio_set_budgets(q, sched, first);
for (tc = 0; tc < num_tc; tc++) {
if (first->gate_duration[tc] == sched->cycle_time)
first->gate_close_time[tc] = KTIME_MAX;
else
first->gate_close_time[tc] = ktime_add_ns(base, first->gate_duration[tc]);
}
rcu_assign_pointer(q->current_entry, NULL);
}
static void taprio_start_sched(struct Qdisc *sch,
ktime_t start, struct sched_gate_list *new)
{
struct taprio_sched *q = qdisc_priv(sch);
ktime_t expires;
if (FULL_OFFLOAD_IS_ENABLED(q->flags))
return;
expires = hrtimer_get_expires(&q->advance_timer);
if (expires == 0)
expires = KTIME_MAX;
/* If the new schedule starts before the next expiration, we
* reprogram it to the earliest one, so we change the admin
* schedule to the operational one at the right time.
*/
start = min_t(ktime_t, start, expires);
hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS);
}
static void taprio_set_picos_per_byte(struct net_device *dev,
struct taprio_sched *q)
{
struct ethtool_link_ksettings ecmd;
int speed = SPEED_10;
int picos_per_byte;
int err;
err = __ethtool_get_link_ksettings(dev, &ecmd);
if (err < 0)
goto skip;
if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN)
speed = ecmd.base.speed;
skip:
picos_per_byte = (USEC_PER_SEC * 8) / speed;
atomic64_set(&q->picos_per_byte, picos_per_byte);
netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
dev->name, (long long)atomic64_read(&q->picos_per_byte),
ecmd.base.speed);
}
static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct sched_gate_list *oper, *admin;
struct qdisc_size_table *stab;
struct taprio_sched *q;
ASSERT_RTNL();
if (event != NETDEV_UP && event != NETDEV_CHANGE)
return NOTIFY_DONE;
list_for_each_entry(q, &taprio_list, taprio_list) {
if (dev != qdisc_dev(q->root))
continue;
taprio_set_picos_per_byte(dev, q);
stab = rtnl_dereference(q->root->stab);
oper = rtnl_dereference(q->oper_sched);
if (oper)
taprio_update_queue_max_sdu(q, oper, stab);
admin = rtnl_dereference(q->admin_sched);
if (admin)
taprio_update_queue_max_sdu(q, admin, stab);
break;
}
return NOTIFY_DONE;
}
static void setup_txtime(struct taprio_sched *q,
struct sched_gate_list *sched, ktime_t base)
{
struct sched_entry *entry;
u64 interval = 0;
list_for_each_entry(entry, &sched->entries, list) {
entry->next_txtime = ktime_add_ns(base, interval);
interval += entry->interval;
}
}
static struct tc_taprio_qopt_offload *taprio_offload_alloc(int num_entries)
{
struct __tc_taprio_qopt_offload *__offload;
__offload = kzalloc(struct_size(__offload, offload.entries, num_entries),
GFP_KERNEL);
if (!__offload)
return NULL;
refcount_set(&__offload->users, 1);
return &__offload->offload;
}
struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload
*offload)
{
struct __tc_taprio_qopt_offload *__offload;
__offload = container_of(offload, struct __tc_taprio_qopt_offload,
offload);
refcount_inc(&__offload->users);
return offload;
}
EXPORT_SYMBOL_GPL(taprio_offload_get);
void taprio_offload_free(struct tc_taprio_qopt_offload *offload)
{
struct __tc_taprio_qopt_offload *__offload;
__offload = container_of(offload, struct __tc_taprio_qopt_offload,
offload);
if (!refcount_dec_and_test(&__offload->users))
return;
kfree(__offload);
}
EXPORT_SYMBOL_GPL(taprio_offload_free);
/* The function will only serve to keep the pointers to the "oper" and "admin"
* schedules valid in relation to their base times, so when calling dump() the
* users looks at the right schedules.
* When using full offload, the admin configuration is promoted to oper at the
* base_time in the PHC time domain. But because the system time is not
* necessarily in sync with that, we can't just trigger a hrtimer to call
* switch_schedules at the right hardware time.
* At the moment we call this by hand right away from taprio, but in the future
* it will be useful to create a mechanism for drivers to notify taprio of the
* offload state (PENDING, ACTIVE, INACTIVE) so it can be visible in dump().
* This is left as TODO.
*/
static void taprio_offload_config_changed(struct taprio_sched *q)
{
struct sched_gate_list *oper, *admin;
oper = rtnl_dereference(q->oper_sched);
admin = rtnl_dereference(q->admin_sched);
switch_schedules(q, &admin, &oper);
}
static u32 tc_map_to_queue_mask(struct net_device *dev, u32 tc_mask)
{
u32 i, queue_mask = 0;
for (i = 0; i < dev->num_tc; i++) {
u32 offset, count;
if (!(tc_mask & BIT(i)))
continue;
offset = dev->tc_to_txq[i].offset;
count = dev->tc_to_txq[i].count;
queue_mask |= GENMASK(offset + count - 1, offset);
}
return queue_mask;
}
static void taprio_sched_to_offload(struct net_device *dev,
struct sched_gate_list *sched,
struct tc_taprio_qopt_offload *offload,
const struct tc_taprio_caps *caps)
{
struct sched_entry *entry;
int i = 0;
offload->base_time = sched->base_time;
offload->cycle_time = sched->cycle_time;
offload->cycle_time_extension = sched->cycle_time_extension;
list_for_each_entry(entry, &sched->entries, list) {
struct tc_taprio_sched_entry *e = &offload->entries[i];
e->command = entry->command;
e->interval = entry->interval;
if (caps->gate_mask_per_txq)
e->gate_mask = tc_map_to_queue_mask(dev,
entry->gate_mask);
else
e->gate_mask = entry->gate_mask;
i++;
}
offload->num_entries = i;
}
static void taprio_detect_broken_mqprio(struct taprio_sched *q)
{
struct net_device *dev = qdisc_dev(q->root);
struct tc_taprio_caps caps;
qdisc_offload_query_caps(dev, TC_SETUP_QDISC_TAPRIO,
&caps, sizeof(caps));
q->broken_mqprio = caps.broken_mqprio;
if (q->broken_mqprio)
static_branch_inc(&taprio_have_broken_mqprio);
else
static_branch_inc(&taprio_have_working_mqprio);
q->detected_mqprio = true;
}
static void taprio_cleanup_broken_mqprio(struct taprio_sched *q)
{
if (!q->detected_mqprio)
return;
if (q->broken_mqprio)
static_branch_dec(&taprio_have_broken_mqprio);
else
static_branch_dec(&taprio_have_working_mqprio);
}
static int taprio_enable_offload(struct net_device *dev,
struct taprio_sched *q,
struct sched_gate_list *sched,
struct netlink_ext_ack *extack)
{
const struct net_device_ops *ops = dev->netdev_ops;
struct tc_taprio_qopt_offload *offload;
struct tc_taprio_caps caps;
int tc, err = 0;
if (!ops->ndo_setup_tc) {
NL_SET_ERR_MSG(extack,
"Device does not support taprio offload");
return -EOPNOTSUPP;
}
qdisc_offload_query_caps(dev, TC_SETUP_QDISC_TAPRIO,
&caps, sizeof(caps));
if (!caps.supports_queue_max_sdu) {
for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
if (q->max_sdu[tc]) {
NL_SET_ERR_MSG_MOD(extack,
"Device does not handle queueMaxSDU");
return -EOPNOTSUPP;
}
}
}
offload = taprio_offload_alloc(sched->num_entries);
if (!offload) {
NL_SET_ERR_MSG(extack,
"Not enough memory for enabling offload mode");
return -ENOMEM;
}
offload->cmd = TAPRIO_CMD_REPLACE;
offload->extack = extack;
mqprio_qopt_reconstruct(dev, &offload->mqprio.qopt);
offload->mqprio.extack = extack;
taprio_sched_to_offload(dev, sched, offload, &caps);
mqprio_fp_to_offload(q->fp, &offload->mqprio);
for (tc = 0; tc < TC_MAX_QUEUE; tc++)
offload->max_sdu[tc] = q->max_sdu[tc];
err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
if (err < 0) {
NL_SET_ERR_MSG_WEAK(extack,
"Device failed to setup taprio offload");
goto done;
}
q->offloaded = true;
done:
/* The offload structure may linger around via a reference taken by the
* device driver, so clear up the netlink extack pointer so that the
* driver isn't tempted to dereference data which stopped being valid
*/
offload->extack = NULL;
offload->mqprio.extack = NULL;
taprio_offload_free(offload);
return err;
}
static int taprio_disable_offload(struct net_device *dev,
struct taprio_sched *q,
struct netlink_ext_ack *extack)
{
const struct net_device_ops *ops = dev->netdev_ops;
struct tc_taprio_qopt_offload *offload;
int err;
if (!q->offloaded)
return 0;
offload = taprio_offload_alloc(0);
if (!offload) {
NL_SET_ERR_MSG(extack,
"Not enough memory to disable offload mode");
return -ENOMEM;
}
offload->cmd = TAPRIO_CMD_DESTROY;
err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
if (err < 0) {
NL_SET_ERR_MSG(extack,
"Device failed to disable offload");
goto out;
}
q->offloaded = false;
out:
taprio_offload_free(offload);
return err;
}
/* If full offload is enabled, the only possible clockid is the net device's
* PHC. For that reason, specifying a clockid through netlink is incorrect.
* For txtime-assist, it is implicitly assumed that the device's PHC is kept
* in sync with the specified clockid via a user space daemon such as phc2sys.
* For both software taprio and txtime-assist, the clockid is used for the
* hrtimer that advances the schedule and hence mandatory.
*/
static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb,
struct netlink_ext_ack *extack)
{
struct taprio_sched *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
int err = -EINVAL;
if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
const struct ethtool_ops *ops = dev->ethtool_ops;
struct ethtool_ts_info info = {
.cmd = ETHTOOL_GET_TS_INFO,
.phc_index = -1,
};
if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
NL_SET_ERR_MSG(extack,
"The 'clockid' cannot be specified for full offload");
goto out;
}
if (ops && ops->get_ts_info)
err = ops->get_ts_info(dev, &info);
if (err || info.phc_index < 0) {
NL_SET_ERR_MSG(extack,
"Device does not have a PTP clock");
err = -ENOTSUPP;
goto out;
}
} else if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
int clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]);
enum tk_offsets tk_offset;
/* We only support static clockids and we don't allow
* for it to be modified after the first init.
*/
if (clockid < 0 ||
(q->clockid != -1 && q->clockid != clockid)) {
NL_SET_ERR_MSG(extack,
"Changing the 'clockid' of a running schedule is not supported");
err = -ENOTSUPP;
goto out;
}
switch (clockid) {
case CLOCK_REALTIME:
tk_offset = TK_OFFS_REAL;
break;
case CLOCK_MONOTONIC:
tk_offset = TK_OFFS_MAX;
break;
case CLOCK_BOOTTIME:
tk_offset = TK_OFFS_BOOT;
break;
case CLOCK_TAI:
tk_offset = TK_OFFS_TAI;
break;
default:
NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
err = -EINVAL;
goto out;
}
/* This pairs with READ_ONCE() in taprio_mono_to_any */
WRITE_ONCE(q->tk_offset, tk_offset);
q->clockid = clockid;
} else {
NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory");
goto out;
}
/* Everything went ok, return success. */
err = 0;
out:
return err;
}
static int taprio_parse_tc_entry(struct Qdisc *sch,
struct nlattr *opt,
u32 max_sdu[TC_QOPT_MAX_QUEUE],
u32 fp[TC_QOPT_MAX_QUEUE],
unsigned long *seen_tcs,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[TCA_TAPRIO_TC_ENTRY_MAX + 1] = { };
struct net_device *dev = qdisc_dev(sch);
int err, tc;
u32 val;
err = nla_parse_nested(tb, TCA_TAPRIO_TC_ENTRY_MAX, opt,
taprio_tc_policy, extack);
if (err < 0)
return err;
if (!tb[TCA_TAPRIO_TC_ENTRY_INDEX]) {
NL_SET_ERR_MSG_MOD(extack, "TC entry index missing");
return -EINVAL;
}
tc = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_INDEX]);
if (tc >= TC_QOPT_MAX_QUEUE) {
NL_SET_ERR_MSG_MOD(extack, "TC entry index out of range");
return -ERANGE;
}
if (*seen_tcs & BIT(tc)) {
NL_SET_ERR_MSG_MOD(extack, "Duplicate TC entry");
return -EINVAL;
}
*seen_tcs |= BIT(tc);
if (tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU]) {
val = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU]);
if (val > dev->max_mtu) {
NL_SET_ERR_MSG_MOD(extack, "TC max SDU exceeds device max MTU");
return -ERANGE;
}
max_sdu[tc] = val;
}
if (tb[TCA_TAPRIO_TC_ENTRY_FP])
fp[tc] = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_FP]);
return 0;
}
static int taprio_parse_tc_entries(struct Qdisc *sch,
struct nlattr *opt,
struct netlink_ext_ack *extack)
{
struct taprio_sched *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
u32 max_sdu[TC_QOPT_MAX_QUEUE];
bool have_preemption = false;
unsigned long seen_tcs = 0;
u32 fp[TC_QOPT_MAX_QUEUE];
struct nlattr *n;
int tc, rem;
int err = 0;
for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) {
max_sdu[tc] = q->max_sdu[tc];
fp[tc] = q->fp[tc];
}
nla_for_each_nested(n, opt, rem) {
if (nla_type(n) != TCA_TAPRIO_ATTR_TC_ENTRY)
continue;
err = taprio_parse_tc_entry(sch, n, max_sdu, fp, &seen_tcs,
extack);
if (err)
return err;
}
for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) {
q->max_sdu[tc] = max_sdu[tc];
q->fp[tc] = fp[tc];
if (fp[tc] != TC_FP_EXPRESS)
have_preemption = true;
}
if (have_preemption) {
if (!FULL_OFFLOAD_IS_ENABLED(q->flags)) {
NL_SET_ERR_MSG(extack,
"Preemption only supported with full offload");
return -EOPNOTSUPP;
}
if (!ethtool_dev_mm_supported(dev)) {
NL_SET_ERR_MSG(extack,
"Device does not support preemption");
return -EOPNOTSUPP;
}
}
return err;
}
static int taprio_mqprio_cmp(const struct net_device *dev,
const struct tc_mqprio_qopt *mqprio)
{
int i;
if (!mqprio || mqprio->num_tc != dev->num_tc)
return -1;
for (i = 0; i < mqprio->num_tc; i++)
if (dev->tc_to_txq[i].count != mqprio->count[i] ||
dev->tc_to_txq[i].offset != mqprio->offset[i])
return -1;
for (i = 0; i <= TC_BITMASK; i++)
if (dev->prio_tc_map[i] != mqprio->prio_tc_map[i])
return -1;
return 0;
}
/* The semantics of the 'flags' argument in relation to 'change()'
* requests, are interpreted following two rules (which are applied in
* this order): (1) an omitted 'flags' argument is interpreted as
* zero; (2) the 'flags' of a "running" taprio instance cannot be
* changed.
*/
static int taprio_new_flags(const struct nlattr *attr, u32 old,
struct netlink_ext_ack *extack)
{
u32 new = 0;
if (attr)
new = nla_get_u32(attr);
if (old != TAPRIO_FLAGS_INVALID && old != new) {
NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported");
return -EOPNOTSUPP;
}
if (!taprio_flags_valid(new)) {
NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid");
return -EINVAL;
}
return new;
}
static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
struct qdisc_size_table *stab = rtnl_dereference(sch->stab);
struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { };
struct sched_gate_list *oper, *admin, *new_admin;
struct taprio_sched *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
struct tc_mqprio_qopt *mqprio = NULL;
unsigned long flags;
ktime_t start;
int i, err;
err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_ATTR_MAX, opt,
taprio_policy, extack);
if (err < 0)
return err;
if (tb[TCA_TAPRIO_ATTR_PRIOMAP])
mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]);
err = taprio_new_flags(tb[TCA_TAPRIO_ATTR_FLAGS],
q->flags, extack);
if (err < 0)
return err;
q->flags = err;
err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
if (err < 0)
return err;
err = taprio_parse_tc_entries(sch, opt, extack);
if (err)
return err;
new_admin = kzalloc(sizeof(*new_admin), GFP_KERNEL);
if (!new_admin) {
NL_SET_ERR_MSG(extack, "Not enough memory for a new schedule");
return -ENOMEM;
}
INIT_LIST_HEAD(&new_admin->entries);
oper = rtnl_dereference(q->oper_sched);
admin = rtnl_dereference(q->admin_sched);
/* no changes - no new mqprio settings */
if (!taprio_mqprio_cmp(dev, mqprio))
mqprio = NULL;
if (mqprio && (oper || admin)) {
NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported");
err = -ENOTSUPP;
goto free_sched;
}
if (mqprio) {
err = netdev_set_num_tc(dev, mqprio->num_tc);
if (err)
goto free_sched;
for (i = 0; i < mqprio->num_tc; i++) {
netdev_set_tc_queue(dev, i,
mqprio->count[i],
mqprio->offset[i]);
q->cur_txq[i] = mqprio->offset[i];
}
/* Always use supplied priority mappings */
for (i = 0; i <= TC_BITMASK; i++)
netdev_set_prio_tc_map(dev, i,
mqprio->prio_tc_map[i]);
}
err = parse_taprio_schedule(q, tb, new_admin, extack);
if (err < 0)
goto free_sched;
if (new_admin->num_entries == 0) {
NL_SET_ERR_MSG(extack, "There should be at least one entry in the schedule");
err = -EINVAL;
goto free_sched;
}
err = taprio_parse_clockid(sch, tb, extack);
if (err < 0)
goto free_sched;
taprio_set_picos_per_byte(dev, q);
taprio_update_queue_max_sdu(q, new_admin, stab);
if (FULL_OFFLOAD_IS_ENABLED(q->flags))
err = taprio_enable_offload(dev, q, new_admin, extack);
else
err = taprio_disable_offload(dev, q, extack);
if (err)
goto free_sched;
/* Protects against enqueue()/dequeue() */
spin_lock_bh(qdisc_lock(sch));
if (tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]) {
if (!TXTIME_ASSIST_IS_ENABLED(q->flags)) {
NL_SET_ERR_MSG_MOD(extack, "txtime-delay can only be set when txtime-assist mode is enabled");
err = -EINVAL;
goto unlock;
}
q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]);
}
if (!TXTIME_ASSIST_IS_ENABLED(q->flags) &&
!FULL_OFFLOAD_IS_ENABLED(q->flags) &&
!hrtimer_active(&q->advance_timer)) {
hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS);
q->advance_timer.function = advance_sched;
}
err = taprio_get_start_time(sch, new_admin, &start);
if (err < 0) {
NL_SET_ERR_MSG(extack, "Internal error: failed get start time");
goto unlock;
}
setup_txtime(q, new_admin, start);
if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
if (!oper) {
rcu_assign_pointer(q->oper_sched, new_admin);
err = 0;
new_admin = NULL;
goto unlock;
}
rcu_assign_pointer(q->admin_sched, new_admin);
if (admin)
call_rcu(&admin->rcu, taprio_free_sched_cb);
} else {
setup_first_end_time(q, new_admin, start);
/* Protects against advance_sched() */
spin_lock_irqsave(&q->current_entry_lock, flags);
taprio_start_sched(sch, start, new_admin);
rcu_assign_pointer(q->admin_sched, new_admin);
if (admin)
call_rcu(&admin->rcu, taprio_free_sched_cb);
spin_unlock_irqrestore(&q->current_entry_lock, flags);
if (FULL_OFFLOAD_IS_ENABLED(q->flags))
taprio_offload_config_changed(q);
}
new_admin = NULL;
err = 0;
if (!stab)
NL_SET_ERR_MSG_MOD(extack,
"Size table not specified, frame length estimations may be inaccurate");
unlock:
spin_unlock_bh(qdisc_lock(sch));
free_sched:
if (new_admin)
call_rcu(&new_admin->rcu, taprio_free_sched_cb);
return err;
}
static void taprio_reset(struct Qdisc *sch)
{
struct taprio_sched *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
int i;
hrtimer_cancel(&q->advance_timer);
if (q->qdiscs) {
for (i = 0; i < dev->num_tx_queues; i++)
if (q->qdiscs[i])
qdisc_reset(q->qdiscs[i]);
}
}
static void taprio_destroy(struct Qdisc *sch)
{
struct taprio_sched *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
struct sched_gate_list *oper, *admin;
unsigned int i;
list_del(&q->taprio_list);
/* Note that taprio_reset() might not be called if an error
* happens in qdisc_create(), after taprio_init() has been called.
*/
hrtimer_cancel(&q->advance_timer);
qdisc_synchronize(sch);
taprio_disable_offload(dev, q, NULL);
if (q->qdiscs) {
for (i = 0; i < dev->num_tx_queues; i++)
qdisc_put(q->qdiscs[i]);
kfree(q->qdiscs);
}
q->qdiscs = NULL;
netdev_reset_tc(dev);
oper = rtnl_dereference(q->oper_sched);
admin = rtnl_dereference(q->admin_sched);
if (oper)
call_rcu(&oper->rcu, taprio_free_sched_cb);
if (admin)
call_rcu(&admin->rcu, taprio_free_sched_cb);
taprio_cleanup_broken_mqprio(q);
}
static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
struct taprio_sched *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
int i, tc;
spin_lock_init(&q->current_entry_lock);
hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS);
q->advance_timer.function = advance_sched;
q->root = sch;
/* We only support static clockids. Use an invalid value as default
* and get the valid one on taprio_change().
*/
q->clockid = -1;
q->flags = TAPRIO_FLAGS_INVALID;
list_add(&q->taprio_list, &taprio_list);
if (sch->parent != TC_H_ROOT) {
NL_SET_ERR_MSG_MOD(extack, "Can only be attached as root qdisc");
return -EOPNOTSUPP;
}
if (!netif_is_multiqueue(dev)) {
NL_SET_ERR_MSG_MOD(extack, "Multi-queue device is required");
return -EOPNOTSUPP;
}
q->qdiscs = kcalloc(dev->num_tx_queues, sizeof(q->qdiscs[0]),
GFP_KERNEL);
if (!q->qdiscs)
return -ENOMEM;
if (!opt)
return -EINVAL;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *dev_queue;
struct Qdisc *qdisc;
dev_queue = netdev_get_tx_queue(dev, i);
qdisc = qdisc_create_dflt(dev_queue,
&pfifo_qdisc_ops,
TC_H_MAKE(TC_H_MAJ(sch->handle),
TC_H_MIN(i + 1)),
extack);
if (!qdisc)
return -ENOMEM;
if (i < dev->real_num_tx_queues)
qdisc_hash_add(qdisc, false);
q->qdiscs[i] = qdisc;
}
for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++)
q->fp[tc] = TC_FP_EXPRESS;
taprio_detect_broken_mqprio(q);
return taprio_change(sch, opt, extack);
}
static void taprio_attach(struct Qdisc *sch)
{
struct taprio_sched *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
unsigned int ntx;
/* Attach underlying qdisc */
for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx);
struct Qdisc *old, *dev_queue_qdisc;
if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
struct Qdisc *qdisc = q->qdiscs[ntx];
/* In offload mode, the root taprio qdisc is bypassed
* and the netdev TX queues see the children directly
*/
qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
dev_queue_qdisc = qdisc;
} else {
/* In software mode, attach the root taprio qdisc
* to all netdev TX queues, so that dev_qdisc_enqueue()
* goes through taprio_enqueue().
*/
dev_queue_qdisc = sch;
}
old = dev_graft_qdisc(dev_queue, dev_queue_qdisc);
/* The qdisc's refcount requires to be elevated once
* for each netdev TX queue it is grafted onto
*/
qdisc_refcount_inc(dev_queue_qdisc);
if (old)
qdisc_put(old);
}
}
static struct netdev_queue *taprio_queue_get(struct Qdisc *sch,
unsigned long cl)
{
struct net_device *dev = qdisc_dev(sch);
unsigned long ntx = cl - 1;
if (ntx >= dev->num_tx_queues)
return NULL;
return netdev_get_tx_queue(dev, ntx);
}
static int taprio_graft(struct Qdisc *sch, unsigned long cl,
struct Qdisc *new, struct Qdisc **old,
struct netlink_ext_ack *extack)
{
struct taprio_sched *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
if (!dev_queue)
return -EINVAL;
if (dev->flags & IFF_UP)
dev_deactivate(dev);
/* In offload mode, the child Qdisc is directly attached to the netdev
* TX queue, and thus, we need to keep its refcount elevated in order
* to counteract qdisc_graft()'s call to qdisc_put() once per TX queue.
* However, save the reference to the new qdisc in the private array in
* both software and offload cases, to have an up-to-date reference to
* our children.
*/
*old = q->qdiscs[cl - 1];
if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
WARN_ON_ONCE(dev_graft_qdisc(dev_queue, new) != *old);
if (new)
qdisc_refcount_inc(new);
if (*old)
qdisc_put(*old);
}
q->qdiscs[cl - 1] = new;
if (new)
new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
if (dev->flags & IFF_UP)
dev_activate(dev);
return 0;
}
static int dump_entry(struct sk_buff *msg,
const struct sched_entry *entry)
{
struct nlattr *item;
item = nla_nest_start_noflag(msg, TCA_TAPRIO_SCHED_ENTRY);
if (!item)
return -ENOSPC;
if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INDEX, entry->index))
goto nla_put_failure;
if (nla_put_u8(msg, TCA_TAPRIO_SCHED_ENTRY_CMD, entry->command))
goto nla_put_failure;
if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK,
entry->gate_mask))
goto nla_put_failure;
if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INTERVAL,
entry->interval))
goto nla_put_failure;
return nla_nest_end(msg, item);
nla_put_failure:
nla_nest_cancel(msg, item);
return -1;
}
static int dump_schedule(struct sk_buff *msg,
const struct sched_gate_list *root)
{
struct nlattr *entry_list;
struct sched_entry *entry;
if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_BASE_TIME,
root->base_time, TCA_TAPRIO_PAD))
return -1;
if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME,
root->cycle_time, TCA_TAPRIO_PAD))
return -1;
if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION,
root->cycle_time_extension, TCA_TAPRIO_PAD))
return -1;
entry_list = nla_nest_start_noflag(msg,
TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST);
if (!entry_list)
goto error_nest;
list_for_each_entry(entry, &root->entries, list) {
if (dump_entry(msg, entry) < 0)
goto error_nest;
}
nla_nest_end(msg, entry_list);
return 0;
error_nest:
nla_nest_cancel(msg, entry_list);
return -1;
}
static int taprio_dump_tc_entries(struct sk_buff *skb,
struct taprio_sched *q,
struct sched_gate_list *sched)
{
struct nlattr *n;
int tc;
for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
n = nla_nest_start(skb, TCA_TAPRIO_ATTR_TC_ENTRY);
if (!n)
return -EMSGSIZE;
if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_INDEX, tc))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_MAX_SDU,
sched->max_sdu[tc]))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_FP, q->fp[tc]))
goto nla_put_failure;
nla_nest_end(skb, n);
}
return 0;
nla_put_failure:
nla_nest_cancel(skb, n);
return -EMSGSIZE;
}
static int taprio_put_stat(struct sk_buff *skb, u64 val, u16 attrtype)
{
if (val == TAPRIO_STAT_NOT_SET)
return 0;
if (nla_put_u64_64bit(skb, attrtype, val, TCA_TAPRIO_OFFLOAD_STATS_PAD))
return -EMSGSIZE;
return 0;
}
static int taprio_dump_xstats(struct Qdisc *sch, struct gnet_dump *d,
struct tc_taprio_qopt_offload *offload,
struct tc_taprio_qopt_stats *stats)
{
struct net_device *dev = qdisc_dev(sch);
const struct net_device_ops *ops;
struct sk_buff *skb = d->skb;
struct nlattr *xstats;
int err;
ops = qdisc_dev(sch)->netdev_ops;
/* FIXME I could use qdisc_offload_dump_helper(), but that messes
* with sch->flags depending on whether the device reports taprio
* stats, and I'm not sure whether that's a good idea, considering
* that stats are optional to the offload itself
*/
if (!ops->ndo_setup_tc)
return 0;
memset(stats, 0xff, sizeof(*stats));
err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
if (err == -EOPNOTSUPP)
return 0;
if (err)
return err;
xstats = nla_nest_start(skb, TCA_STATS_APP);
if (!xstats)
goto err;
if (taprio_put_stat(skb, stats->window_drops,
TCA_TAPRIO_OFFLOAD_STATS_WINDOW_DROPS) ||
taprio_put_stat(skb, stats->tx_overruns,
TCA_TAPRIO_OFFLOAD_STATS_TX_OVERRUNS))
goto err_cancel;
nla_nest_end(skb, xstats);
return 0;
err_cancel:
nla_nest_cancel(skb, xstats);
err:
return -EMSGSIZE;
}
static int taprio_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
{
struct tc_taprio_qopt_offload offload = {
.cmd = TAPRIO_CMD_STATS,
};
return taprio_dump_xstats(sch, d, &offload, &offload.stats);
}
static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct taprio_sched *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
struct sched_gate_list *oper, *admin;
struct tc_mqprio_qopt opt = { 0 };
struct nlattr *nest, *sched_nest;
oper = rtnl_dereference(q->oper_sched);
admin = rtnl_dereference(q->admin_sched);
mqprio_qopt_reconstruct(dev, &opt);
nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
if (!nest)
goto start_error;
if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt))
goto options_error;
if (!FULL_OFFLOAD_IS_ENABLED(q->flags) &&
nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid))
goto options_error;
if (q->flags && nla_put_u32(skb, TCA_TAPRIO_ATTR_FLAGS, q->flags))
goto options_error;
if (q->txtime_delay &&
nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay))
goto options_error;
if (oper && taprio_dump_tc_entries(skb, q, oper))
goto options_error;
if (oper && dump_schedule(skb, oper))
goto options_error;
if (!admin)
goto done;
sched_nest = nla_nest_start_noflag(skb, TCA_TAPRIO_ATTR_ADMIN_SCHED);
if (!sched_nest)
goto options_error;
if (dump_schedule(skb, admin))
goto admin_error;
nla_nest_end(skb, sched_nest);
done:
return nla_nest_end(skb, nest);
admin_error:
nla_nest_cancel(skb, sched_nest);
options_error:
nla_nest_cancel(skb, nest);
start_error:
return -ENOSPC;
}
static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
{
struct taprio_sched *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
unsigned int ntx = cl - 1;
if (ntx >= dev->num_tx_queues)
return NULL;
return q->qdiscs[ntx];
}
static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
{
unsigned int ntx = TC_H_MIN(classid);
if (!taprio_queue_get(sch, ntx))
return 0;
return ntx;
}
static int taprio_dump_class(struct Qdisc *sch, unsigned long cl,
struct sk_buff *skb, struct tcmsg *tcm)
{
struct Qdisc *child = taprio_leaf(sch, cl);
tcm->tcm_parent = TC_H_ROOT;
tcm->tcm_handle |= TC_H_MIN(cl);
tcm->tcm_info = child->handle;
return 0;
}
static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
struct gnet_dump *d)
__releases(d->lock)
__acquires(d->lock)
{
struct Qdisc *child = taprio_leaf(sch, cl);
struct tc_taprio_qopt_offload offload = {
.cmd = TAPRIO_CMD_QUEUE_STATS,
.queue_stats = {
.queue = cl - 1,
},
};
if (gnet_stats_copy_basic(d, NULL, &child->bstats, true) < 0 ||
qdisc_qstats_copy(d, child) < 0)
return -1;
return taprio_dump_xstats(sch, d, &offload, &offload.queue_stats.stats);
}
static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
struct net_device *dev = qdisc_dev(sch);
unsigned long ntx;
if (arg->stop)
return;
arg->count = arg->skip;
for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
if (!tc_qdisc_stats_dump(sch, ntx + 1, arg))
break;
}
}
static struct netdev_queue *taprio_select_queue(struct Qdisc *sch,
struct tcmsg *tcm)
{
return taprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
}
static const struct Qdisc_class_ops taprio_class_ops = {
.graft = taprio_graft,
.leaf = taprio_leaf,
.find = taprio_find,
.walk = taprio_walk,
.dump = taprio_dump_class,
.dump_stats = taprio_dump_class_stats,
.select_queue = taprio_select_queue,
};
static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
.cl_ops = &taprio_class_ops,
.id = "taprio",
.priv_size = sizeof(struct taprio_sched),
.init = taprio_init,
.change = taprio_change,
.destroy = taprio_destroy,
.reset = taprio_reset,
.attach = taprio_attach,
.peek = taprio_peek,
.dequeue = taprio_dequeue,
.enqueue = taprio_enqueue,
.dump = taprio_dump,
.dump_stats = taprio_dump_stats,
.owner = THIS_MODULE,
};
static struct notifier_block taprio_device_notifier = {
.notifier_call = taprio_dev_notifier,
};
static int __init taprio_module_init(void)
{
int err = register_netdevice_notifier(&taprio_device_notifier);
if (err)
return err;
return register_qdisc(&taprio_qdisc_ops);
}
static void __exit taprio_module_exit(void)
{
unregister_qdisc(&taprio_qdisc_ops);
unregister_netdevice_notifier(&taprio_device_notifier);
}
module_init(taprio_module_init);
module_exit(taprio_module_exit);
MODULE_LICENSE("GPL");
| linux-master | net/sched/sch_taprio.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier.
*
* Authors: Alexey Kuznetsov, <[email protected]>
*
* The filters are packed to hash tables of key nodes
* with a set of 32bit key/mask pairs at every node.
* Nodes reference next level hash tables etc.
*
* This scheme is the best universal classifier I managed to
* invent; it is not super-fast, but it is not slow (provided you
* program it correctly), and general enough. And its relative
* speed grows as the number of rules becomes larger.
*
* It seems that it represents the best middle point between
* speed and manageability both by human and by machine.
*
* It is especially useful for link sharing combined with QoS;
* pure RSVP doesn't need such a general approach and can use
* much simpler (and faster) schemes, sort of cls_rsvp.c.
*
* nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/percpu.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include <linux/bitmap.h>
#include <linux/netdevice.h>
#include <linux/hash.h>
#include <net/netlink.h>
#include <net/act_api.h>
#include <net/pkt_cls.h>
#include <linux/idr.h>
#include <net/tc_wrapper.h>
struct tc_u_knode {
struct tc_u_knode __rcu *next;
u32 handle;
struct tc_u_hnode __rcu *ht_up;
struct tcf_exts exts;
int ifindex;
u8 fshift;
struct tcf_result res;
struct tc_u_hnode __rcu *ht_down;
#ifdef CONFIG_CLS_U32_PERF
struct tc_u32_pcnt __percpu *pf;
#endif
u32 flags;
unsigned int in_hw_count;
#ifdef CONFIG_CLS_U32_MARK
u32 val;
u32 mask;
u32 __percpu *pcpu_success;
#endif
struct rcu_work rwork;
/* The 'sel' field MUST be the last field in structure to allow for
* tc_u32_keys allocated at end of structure.
*/
struct tc_u32_sel sel;
};
struct tc_u_hnode {
struct tc_u_hnode __rcu *next;
u32 handle;
u32 prio;
int refcnt;
unsigned int divisor;
struct idr handle_idr;
bool is_root;
struct rcu_head rcu;
u32 flags;
/* The 'ht' field MUST be the last field in structure to allow for
* more entries allocated at end of structure.
*/
struct tc_u_knode __rcu *ht[];
};
struct tc_u_common {
struct tc_u_hnode __rcu *hlist;
void *ptr;
int refcnt;
struct idr handle_idr;
struct hlist_node hnode;
long knodes;
};
static inline unsigned int u32_hash_fold(__be32 key,
const struct tc_u32_sel *sel,
u8 fshift)
{
unsigned int h = ntohl(key & sel->hmask) >> fshift;
return h;
}
TC_INDIRECT_SCOPE int u32_classify(struct sk_buff *skb,
const struct tcf_proto *tp,
struct tcf_result *res)
{
struct {
struct tc_u_knode *knode;
unsigned int off;
} stack[TC_U32_MAXDEPTH];
struct tc_u_hnode *ht = rcu_dereference_bh(tp->root);
unsigned int off = skb_network_offset(skb);
struct tc_u_knode *n;
int sdepth = 0;
int off2 = 0;
int sel = 0;
#ifdef CONFIG_CLS_U32_PERF
int j;
#endif
int i, r;
next_ht:
n = rcu_dereference_bh(ht->ht[sel]);
next_knode:
if (n) {
struct tc_u32_key *key = n->sel.keys;
#ifdef CONFIG_CLS_U32_PERF
__this_cpu_inc(n->pf->rcnt);
j = 0;
#endif
if (tc_skip_sw(n->flags)) {
n = rcu_dereference_bh(n->next);
goto next_knode;
}
#ifdef CONFIG_CLS_U32_MARK
if ((skb->mark & n->mask) != n->val) {
n = rcu_dereference_bh(n->next);
goto next_knode;
} else {
__this_cpu_inc(*n->pcpu_success);
}
#endif
for (i = n->sel.nkeys; i > 0; i--, key++) {
int toff = off + key->off + (off2 & key->offmask);
__be32 *data, hdata;
if (skb_headroom(skb) + toff > INT_MAX)
goto out;
data = skb_header_pointer(skb, toff, 4, &hdata);
if (!data)
goto out;
if ((*data ^ key->val) & key->mask) {
n = rcu_dereference_bh(n->next);
goto next_knode;
}
#ifdef CONFIG_CLS_U32_PERF
__this_cpu_inc(n->pf->kcnts[j]);
j++;
#endif
}
ht = rcu_dereference_bh(n->ht_down);
if (!ht) {
check_terminal:
if (n->sel.flags & TC_U32_TERMINAL) {
*res = n->res;
if (!tcf_match_indev(skb, n->ifindex)) {
n = rcu_dereference_bh(n->next);
goto next_knode;
}
#ifdef CONFIG_CLS_U32_PERF
__this_cpu_inc(n->pf->rhit);
#endif
r = tcf_exts_exec(skb, &n->exts, res);
if (r < 0) {
n = rcu_dereference_bh(n->next);
goto next_knode;
}
return r;
}
n = rcu_dereference_bh(n->next);
goto next_knode;
}
/* PUSH */
if (sdepth >= TC_U32_MAXDEPTH)
goto deadloop;
stack[sdepth].knode = n;
stack[sdepth].off = off;
sdepth++;
ht = rcu_dereference_bh(n->ht_down);
sel = 0;
if (ht->divisor) {
__be32 *data, hdata;
data = skb_header_pointer(skb, off + n->sel.hoff, 4,
&hdata);
if (!data)
goto out;
sel = ht->divisor & u32_hash_fold(*data, &n->sel,
n->fshift);
}
if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
goto next_ht;
if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
off2 = n->sel.off + 3;
if (n->sel.flags & TC_U32_VAROFFSET) {
__be16 *data, hdata;
data = skb_header_pointer(skb,
off + n->sel.offoff,
2, &hdata);
if (!data)
goto out;
off2 += ntohs(n->sel.offmask & *data) >>
n->sel.offshift;
}
off2 &= ~3;
}
if (n->sel.flags & TC_U32_EAT) {
off += off2;
off2 = 0;
}
if (off < skb->len)
goto next_ht;
}
/* POP */
if (sdepth--) {
n = stack[sdepth].knode;
ht = rcu_dereference_bh(n->ht_up);
off = stack[sdepth].off;
goto check_terminal;
}
out:
return -1;
deadloop:
net_warn_ratelimited("cls_u32: dead loop\n");
return -1;
}
static struct tc_u_hnode *u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
{
struct tc_u_hnode *ht;
for (ht = rtnl_dereference(tp_c->hlist);
ht;
ht = rtnl_dereference(ht->next))
if (ht->handle == handle)
break;
return ht;
}
static struct tc_u_knode *u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
{
unsigned int sel;
struct tc_u_knode *n = NULL;
sel = TC_U32_HASH(handle);
if (sel > ht->divisor)
goto out;
for (n = rtnl_dereference(ht->ht[sel]);
n;
n = rtnl_dereference(n->next))
if (n->handle == handle)
break;
out:
return n;
}
static void *u32_get(struct tcf_proto *tp, u32 handle)
{
struct tc_u_hnode *ht;
struct tc_u_common *tp_c = tp->data;
if (TC_U32_HTID(handle) == TC_U32_ROOT)
ht = rtnl_dereference(tp->root);
else
ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
if (!ht)
return NULL;
if (TC_U32_KEY(handle) == 0)
return ht;
return u32_lookup_key(ht, handle);
}
/* Protected by rtnl lock */
static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr)
{
int id = idr_alloc_cyclic(&tp_c->handle_idr, ptr, 1, 0x7FF, GFP_KERNEL);
if (id < 0)
return 0;
return (id | 0x800U) << 20;
}
static struct hlist_head *tc_u_common_hash;
#define U32_HASH_SHIFT 10
#define U32_HASH_SIZE (1 << U32_HASH_SHIFT)
static void *tc_u_common_ptr(const struct tcf_proto *tp)
{
struct tcf_block *block = tp->chain->block;
/* The block sharing is currently supported only
* for classless qdiscs. In that case we use block
* for tc_u_common identification. In case the
* block is not shared, block->q is a valid pointer
* and we can use that. That works for classful qdiscs.
*/
if (tcf_block_shared(block))
return block;
else
return block->q;
}
static struct hlist_head *tc_u_hash(void *key)
{
return tc_u_common_hash + hash_ptr(key, U32_HASH_SHIFT);
}
static struct tc_u_common *tc_u_common_find(void *key)
{
struct tc_u_common *tc;
hlist_for_each_entry(tc, tc_u_hash(key), hnode) {
if (tc->ptr == key)
return tc;
}
return NULL;
}
static int u32_init(struct tcf_proto *tp)
{
struct tc_u_hnode *root_ht;
void *key = tc_u_common_ptr(tp);
struct tc_u_common *tp_c = tc_u_common_find(key);
root_ht = kzalloc(struct_size(root_ht, ht, 1), GFP_KERNEL);
if (root_ht == NULL)
return -ENOBUFS;
root_ht->refcnt++;
root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : 0x80000000;
root_ht->prio = tp->prio;
root_ht->is_root = true;
idr_init(&root_ht->handle_idr);
if (tp_c == NULL) {
tp_c = kzalloc(struct_size(tp_c, hlist->ht, 1), GFP_KERNEL);
if (tp_c == NULL) {
kfree(root_ht);
return -ENOBUFS;
}
tp_c->ptr = key;
INIT_HLIST_NODE(&tp_c->hnode);
idr_init(&tp_c->handle_idr);
hlist_add_head(&tp_c->hnode, tc_u_hash(key));
}
tp_c->refcnt++;
RCU_INIT_POINTER(root_ht->next, tp_c->hlist);
rcu_assign_pointer(tp_c->hlist, root_ht);
root_ht->refcnt++;
rcu_assign_pointer(tp->root, root_ht);
tp->data = tp_c;
return 0;
}
static void __u32_destroy_key(struct tc_u_knode *n)
{
struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
tcf_exts_destroy(&n->exts);
if (ht && --ht->refcnt == 0)
kfree(ht);
kfree(n);
}
static void u32_destroy_key(struct tc_u_knode *n, bool free_pf)
{
tcf_exts_put_net(&n->exts);
#ifdef CONFIG_CLS_U32_PERF
if (free_pf)
free_percpu(n->pf);
#endif
#ifdef CONFIG_CLS_U32_MARK
if (free_pf)
free_percpu(n->pcpu_success);
#endif
__u32_destroy_key(n);
}
/* u32_delete_key_rcu should be called when free'ing a copied
* version of a tc_u_knode obtained from u32_init_knode(). When
* copies are obtained from u32_init_knode() the statistics are
* shared between the old and new copies to allow readers to
* continue to update the statistics during the copy. To support
* this the u32_delete_key_rcu variant does not free the percpu
* statistics.
*/
static void u32_delete_key_work(struct work_struct *work)
{
struct tc_u_knode *key = container_of(to_rcu_work(work),
struct tc_u_knode,
rwork);
rtnl_lock();
u32_destroy_key(key, false);
rtnl_unlock();
}
/* u32_delete_key_freepf_rcu is the rcu callback variant
* that free's the entire structure including the statistics
* percpu variables. Only use this if the key is not a copy
* returned by u32_init_knode(). See u32_delete_key_rcu()
* for the variant that should be used with keys return from
* u32_init_knode()
*/
static void u32_delete_key_freepf_work(struct work_struct *work)
{
struct tc_u_knode *key = container_of(to_rcu_work(work),
struct tc_u_knode,
rwork);
rtnl_lock();
u32_destroy_key(key, true);
rtnl_unlock();
}
static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
{
struct tc_u_common *tp_c = tp->data;
struct tc_u_knode __rcu **kp;
struct tc_u_knode *pkp;
struct tc_u_hnode *ht = rtnl_dereference(key->ht_up);
if (ht) {
kp = &ht->ht[TC_U32_HASH(key->handle)];
for (pkp = rtnl_dereference(*kp); pkp;
kp = &pkp->next, pkp = rtnl_dereference(*kp)) {
if (pkp == key) {
RCU_INIT_POINTER(*kp, key->next);
tp_c->knodes--;
tcf_unbind_filter(tp, &key->res);
idr_remove(&ht->handle_idr, key->handle);
tcf_exts_get_net(&key->exts);
tcf_queue_work(&key->rwork, u32_delete_key_freepf_work);
return 0;
}
}
}
WARN_ON(1);
return 0;
}
static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
struct netlink_ext_ack *extack)
{
struct tcf_block *block = tp->chain->block;
struct tc_cls_u32_offload cls_u32 = {};
tc_cls_common_offload_init(&cls_u32.common, tp, h->flags, extack);
cls_u32.command = TC_CLSU32_DELETE_HNODE;
cls_u32.hnode.divisor = h->divisor;
cls_u32.hnode.handle = h->handle;
cls_u32.hnode.prio = h->prio;
tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, false, true);
}
static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
u32 flags, struct netlink_ext_ack *extack)
{
struct tcf_block *block = tp->chain->block;
struct tc_cls_u32_offload cls_u32 = {};
bool skip_sw = tc_skip_sw(flags);
bool offloaded = false;
int err;
tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
cls_u32.command = TC_CLSU32_NEW_HNODE;
cls_u32.hnode.divisor = h->divisor;
cls_u32.hnode.handle = h->handle;
cls_u32.hnode.prio = h->prio;
err = tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, skip_sw, true);
if (err < 0) {
u32_clear_hw_hnode(tp, h, NULL);
return err;
} else if (err > 0) {
offloaded = true;
}
if (skip_sw && !offloaded)
return -EINVAL;
return 0;
}
static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
struct netlink_ext_ack *extack)
{
struct tcf_block *block = tp->chain->block;
struct tc_cls_u32_offload cls_u32 = {};
tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
cls_u32.command = TC_CLSU32_DELETE_KNODE;
cls_u32.knode.handle = n->handle;
tc_setup_cb_destroy(block, tp, TC_SETUP_CLSU32, &cls_u32, false,
&n->flags, &n->in_hw_count, true);
}
static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
u32 flags, struct netlink_ext_ack *extack)
{
struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
struct tcf_block *block = tp->chain->block;
struct tc_cls_u32_offload cls_u32 = {};
bool skip_sw = tc_skip_sw(flags);
int err;
tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
cls_u32.command = TC_CLSU32_REPLACE_KNODE;
cls_u32.knode.handle = n->handle;
cls_u32.knode.fshift = n->fshift;
#ifdef CONFIG_CLS_U32_MARK
cls_u32.knode.val = n->val;
cls_u32.knode.mask = n->mask;
#else
cls_u32.knode.val = 0;
cls_u32.knode.mask = 0;
#endif
cls_u32.knode.sel = &n->sel;
cls_u32.knode.res = &n->res;
cls_u32.knode.exts = &n->exts;
if (n->ht_down)
cls_u32.knode.link_handle = ht->handle;
err = tc_setup_cb_add(block, tp, TC_SETUP_CLSU32, &cls_u32, skip_sw,
&n->flags, &n->in_hw_count, true);
if (err) {
u32_remove_hw_knode(tp, n, NULL);
return err;
}
if (skip_sw && !(n->flags & TCA_CLS_FLAGS_IN_HW))
return -EINVAL;
return 0;
}
static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
struct netlink_ext_ack *extack)
{
struct tc_u_common *tp_c = tp->data;
struct tc_u_knode *n;
unsigned int h;
for (h = 0; h <= ht->divisor; h++) {
while ((n = rtnl_dereference(ht->ht[h])) != NULL) {
RCU_INIT_POINTER(ht->ht[h],
rtnl_dereference(n->next));
tp_c->knodes--;
tcf_unbind_filter(tp, &n->res);
u32_remove_hw_knode(tp, n, extack);
idr_remove(&ht->handle_idr, n->handle);
if (tcf_exts_get_net(&n->exts))
tcf_queue_work(&n->rwork, u32_delete_key_freepf_work);
else
u32_destroy_key(n, true);
}
}
}
static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
struct netlink_ext_ack *extack)
{
struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode __rcu **hn;
struct tc_u_hnode *phn;
WARN_ON(--ht->refcnt);
u32_clear_hnode(tp, ht, extack);
hn = &tp_c->hlist;
for (phn = rtnl_dereference(*hn);
phn;
hn = &phn->next, phn = rtnl_dereference(*hn)) {
if (phn == ht) {
u32_clear_hw_hnode(tp, ht, extack);
idr_destroy(&ht->handle_idr);
idr_remove(&tp_c->handle_idr, ht->handle);
RCU_INIT_POINTER(*hn, ht->next);
kfree_rcu(ht, rcu);
return 0;
}
}
return -ENOENT;
}
static void u32_destroy(struct tcf_proto *tp, bool rtnl_held,
struct netlink_ext_ack *extack)
{
struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
WARN_ON(root_ht == NULL);
if (root_ht && --root_ht->refcnt == 1)
u32_destroy_hnode(tp, root_ht, extack);
if (--tp_c->refcnt == 0) {
struct tc_u_hnode *ht;
hlist_del(&tp_c->hnode);
while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) {
u32_clear_hnode(tp, ht, extack);
RCU_INIT_POINTER(tp_c->hlist, ht->next);
/* u32_destroy_key() will later free ht for us, if it's
* still referenced by some knode
*/
if (--ht->refcnt == 0)
kfree_rcu(ht, rcu);
}
idr_destroy(&tp_c->handle_idr);
kfree(tp_c);
}
tp->data = NULL;
}
static int u32_delete(struct tcf_proto *tp, void *arg, bool *last,
bool rtnl_held, struct netlink_ext_ack *extack)
{
struct tc_u_hnode *ht = arg;
struct tc_u_common *tp_c = tp->data;
int ret = 0;
if (TC_U32_KEY(ht->handle)) {
u32_remove_hw_knode(tp, (struct tc_u_knode *)ht, extack);
ret = u32_delete_key(tp, (struct tc_u_knode *)ht);
goto out;
}
if (ht->is_root) {
NL_SET_ERR_MSG_MOD(extack, "Not allowed to delete root node");
return -EINVAL;
}
if (ht->refcnt == 1) {
u32_destroy_hnode(tp, ht, extack);
} else {
NL_SET_ERR_MSG_MOD(extack, "Can not delete in-use filter");
return -EBUSY;
}
out:
*last = tp_c->refcnt == 1 && tp_c->knodes == 0;
return ret;
}
static u32 gen_new_kid(struct tc_u_hnode *ht, u32 htid)
{
u32 index = htid | 0x800;
u32 max = htid | 0xFFF;
if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max, GFP_KERNEL)) {
index = htid + 1;
if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max,
GFP_KERNEL))
index = max;
}
return index;
}
static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
[TCA_U32_CLASSID] = { .type = NLA_U32 },
[TCA_U32_HASH] = { .type = NLA_U32 },
[TCA_U32_LINK] = { .type = NLA_U32 },
[TCA_U32_DIVISOR] = { .type = NLA_U32 },
[TCA_U32_SEL] = { .len = sizeof(struct tc_u32_sel) },
[TCA_U32_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ },
[TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) },
[TCA_U32_FLAGS] = { .type = NLA_U32 },
};
static void u32_unbind_filter(struct tcf_proto *tp, struct tc_u_knode *n,
struct nlattr **tb)
{
if (tb[TCA_U32_CLASSID])
tcf_unbind_filter(tp, &n->res);
}
static void u32_bind_filter(struct tcf_proto *tp, struct tc_u_knode *n,
unsigned long base, struct nlattr **tb)
{
if (tb[TCA_U32_CLASSID]) {
n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
tcf_bind_filter(tp, &n->res, base);
}
}
static int u32_set_parms(struct net *net, struct tcf_proto *tp,
struct tc_u_knode *n, struct nlattr **tb,
struct nlattr *est, u32 flags, u32 fl_flags,
struct netlink_ext_ack *extack)
{
int err, ifindex = -1;
err = tcf_exts_validate_ex(net, tp, tb, est, &n->exts, flags,
fl_flags, extack);
if (err < 0)
return err;
if (tb[TCA_U32_INDEV]) {
ifindex = tcf_change_indev(net, tb[TCA_U32_INDEV], extack);
if (ifindex < 0)
return -EINVAL;
}
if (tb[TCA_U32_LINK]) {
u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
struct tc_u_hnode *ht_down = NULL, *ht_old;
if (TC_U32_KEY(handle)) {
NL_SET_ERR_MSG_MOD(extack, "u32 Link handle must be a hash table");
return -EINVAL;
}
if (handle) {
ht_down = u32_lookup_ht(tp->data, handle);
if (!ht_down) {
NL_SET_ERR_MSG_MOD(extack, "Link hash table not found");
return -EINVAL;
}
if (ht_down->is_root) {
NL_SET_ERR_MSG_MOD(extack, "Not linking to root node");
return -EINVAL;
}
ht_down->refcnt++;
}
ht_old = rtnl_dereference(n->ht_down);
rcu_assign_pointer(n->ht_down, ht_down);
if (ht_old)
ht_old->refcnt--;
}
if (ifindex >= 0)
n->ifindex = ifindex;
return 0;
}
static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c,
struct tc_u_knode *n)
{
struct tc_u_knode __rcu **ins;
struct tc_u_knode *pins;
struct tc_u_hnode *ht;
if (TC_U32_HTID(n->handle) == TC_U32_ROOT)
ht = rtnl_dereference(tp->root);
else
ht = u32_lookup_ht(tp_c, TC_U32_HTID(n->handle));
ins = &ht->ht[TC_U32_HASH(n->handle)];
/* The node must always exist for it to be replaced if this is not the
* case then something went very wrong elsewhere.
*/
for (pins = rtnl_dereference(*ins); ;
ins = &pins->next, pins = rtnl_dereference(*ins))
if (pins->handle == n->handle)
break;
idr_replace(&ht->handle_idr, n, n->handle);
RCU_INIT_POINTER(n->next, pins->next);
rcu_assign_pointer(*ins, n);
}
static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
struct tc_u_knode *n)
{
struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
struct tc_u32_sel *s = &n->sel;
struct tc_u_knode *new;
new = kzalloc(struct_size(new, sel.keys, s->nkeys), GFP_KERNEL);
if (!new)
return NULL;
RCU_INIT_POINTER(new->next, n->next);
new->handle = n->handle;
RCU_INIT_POINTER(new->ht_up, n->ht_up);
new->ifindex = n->ifindex;
new->fshift = n->fshift;
new->flags = n->flags;
RCU_INIT_POINTER(new->ht_down, ht);
#ifdef CONFIG_CLS_U32_PERF
/* Statistics may be incremented by readers during update
* so we must keep them in tact. When the node is later destroyed
* a special destroy call must be made to not free the pf memory.
*/
new->pf = n->pf;
#endif
#ifdef CONFIG_CLS_U32_MARK
new->val = n->val;
new->mask = n->mask;
/* Similarly success statistics must be moved as pointers */
new->pcpu_success = n->pcpu_success;
#endif
memcpy(&new->sel, s, struct_size(s, keys, s->nkeys));
if (tcf_exts_init(&new->exts, net, TCA_U32_ACT, TCA_U32_POLICE)) {
kfree(new);
return NULL;
}
/* bump reference count as long as we hold pointer to structure */
if (ht)
ht->refcnt++;
return new;
}
static int u32_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base, u32 handle,
struct nlattr **tca, void **arg, u32 flags,
struct netlink_ext_ack *extack)
{
struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode *ht;
struct tc_u_knode *n;
struct tc_u32_sel *s;
struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_U32_MAX + 1];
u32 htid, userflags = 0;
size_t sel_size;
int err;
if (!opt) {
if (handle) {
NL_SET_ERR_MSG_MOD(extack, "Filter handle requires options");
return -EINVAL;
} else {
return 0;
}
}
err = nla_parse_nested_deprecated(tb, TCA_U32_MAX, opt, u32_policy,
extack);
if (err < 0)
return err;
if (tb[TCA_U32_FLAGS]) {
userflags = nla_get_u32(tb[TCA_U32_FLAGS]);
if (!tc_flags_valid(userflags)) {
NL_SET_ERR_MSG_MOD(extack, "Invalid filter flags");
return -EINVAL;
}
}
n = *arg;
if (n) {
struct tc_u_knode *new;
if (TC_U32_KEY(n->handle) == 0) {
NL_SET_ERR_MSG_MOD(extack, "Key node id cannot be zero");
return -EINVAL;
}
if ((n->flags ^ userflags) &
~(TCA_CLS_FLAGS_IN_HW | TCA_CLS_FLAGS_NOT_IN_HW)) {
NL_SET_ERR_MSG_MOD(extack, "Key node flags do not match passed flags");
return -EINVAL;
}
new = u32_init_knode(net, tp, n);
if (!new)
return -ENOMEM;
err = u32_set_parms(net, tp, new, tb, tca[TCA_RATE],
flags, new->flags, extack);
if (err) {
__u32_destroy_key(new);
return err;
}
u32_bind_filter(tp, new, base, tb);
err = u32_replace_hw_knode(tp, new, flags, extack);
if (err) {
u32_unbind_filter(tp, new, tb);
if (tb[TCA_U32_LINK]) {
struct tc_u_hnode *ht_old;
ht_old = rtnl_dereference(n->ht_down);
if (ht_old)
ht_old->refcnt++;
}
__u32_destroy_key(new);
return err;
}
if (!tc_in_hw(new->flags))
new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
u32_replace_knode(tp, tp_c, new);
tcf_unbind_filter(tp, &n->res);
tcf_exts_get_net(&n->exts);
tcf_queue_work(&n->rwork, u32_delete_key_work);
return 0;
}
if (tb[TCA_U32_DIVISOR]) {
unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
if (!is_power_of_2(divisor)) {
NL_SET_ERR_MSG_MOD(extack, "Divisor is not a power of 2");
return -EINVAL;
}
if (divisor-- > 0x100) {
NL_SET_ERR_MSG_MOD(extack, "Exceeded maximum 256 hash buckets");
return -EINVAL;
}
if (TC_U32_KEY(handle)) {
NL_SET_ERR_MSG_MOD(extack, "Divisor can only be used on a hash table");
return -EINVAL;
}
ht = kzalloc(struct_size(ht, ht, divisor + 1), GFP_KERNEL);
if (ht == NULL)
return -ENOBUFS;
if (handle == 0) {
handle = gen_new_htid(tp->data, ht);
if (handle == 0) {
kfree(ht);
return -ENOMEM;
}
} else {
err = idr_alloc_u32(&tp_c->handle_idr, ht, &handle,
handle, GFP_KERNEL);
if (err) {
kfree(ht);
return err;
}
}
ht->refcnt = 1;
ht->divisor = divisor;
ht->handle = handle;
ht->prio = tp->prio;
idr_init(&ht->handle_idr);
ht->flags = userflags;
err = u32_replace_hw_hnode(tp, ht, userflags, extack);
if (err) {
idr_remove(&tp_c->handle_idr, handle);
kfree(ht);
return err;
}
RCU_INIT_POINTER(ht->next, tp_c->hlist);
rcu_assign_pointer(tp_c->hlist, ht);
*arg = ht;
return 0;
}
if (tb[TCA_U32_HASH]) {
htid = nla_get_u32(tb[TCA_U32_HASH]);
if (TC_U32_HTID(htid) == TC_U32_ROOT) {
ht = rtnl_dereference(tp->root);
htid = ht->handle;
} else {
ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
if (!ht) {
NL_SET_ERR_MSG_MOD(extack, "Specified hash table not found");
return -EINVAL;
}
}
} else {
ht = rtnl_dereference(tp->root);
htid = ht->handle;
}
if (ht->divisor < TC_U32_HASH(htid)) {
NL_SET_ERR_MSG_MOD(extack, "Specified hash table buckets exceed configured value");
return -EINVAL;
}
/* At this point, we need to derive the new handle that will be used to
* uniquely map the identity of this table match entry. The
* identity of the entry that we need to construct is 32 bits made of:
* htid(12b):bucketid(8b):node/entryid(12b)
*
* At this point _we have the table(ht)_ in which we will insert this
* entry. We carry the table's id in variable "htid".
* Note that earlier code picked the ht selection either by a) the user
* providing the htid specified via TCA_U32_HASH attribute or b) when
* no such attribute is passed then the root ht, is default to at ID
* 0x[800][00][000]. Rule: the root table has a single bucket with ID 0.
* If OTOH the user passed us the htid, they may also pass a bucketid of
* choice. 0 is fine. For example a user htid is 0x[600][01][000] it is
* indicating hash bucketid of 1. Rule: the entry/node ID _cannot_ be
* passed via the htid, so even if it was non-zero it will be ignored.
*
* We may also have a handle, if the user passed one. The handle also
* carries the same addressing of htid(12b):bucketid(8b):node/entryid(12b).
* Rule: the bucketid on the handle is ignored even if one was passed;
* rather the value on "htid" is always assumed to be the bucketid.
*/
if (handle) {
/* Rule: The htid from handle and tableid from htid must match */
if (TC_U32_HTID(handle) && TC_U32_HTID(handle ^ htid)) {
NL_SET_ERR_MSG_MOD(extack, "Handle specified hash table address mismatch");
return -EINVAL;
}
/* Ok, so far we have a valid htid(12b):bucketid(8b) but we
* need to finalize the table entry identification with the last
* part - the node/entryid(12b)). Rule: Nodeid _cannot be 0_ for
* entries. Rule: nodeid of 0 is reserved only for tables(see
* earlier code which processes TC_U32_DIVISOR attribute).
* Rule: The nodeid can only be derived from the handle (and not
* htid).
* Rule: if the handle specified zero for the node id example
* 0x60000000, then pick a new nodeid from the pool of IDs
* this hash table has been allocating from.
* If OTOH it is specified (i.e for example the user passed a
* handle such as 0x60000123), then we use it generate our final
* handle which is used to uniquely identify the match entry.
*/
if (!TC_U32_NODE(handle)) {
handle = gen_new_kid(ht, htid);
} else {
handle = htid | TC_U32_NODE(handle);
err = idr_alloc_u32(&ht->handle_idr, NULL, &handle,
handle, GFP_KERNEL);
if (err)
return err;
}
} else {
/* The user did not give us a handle; lets just generate one
* from the table's pool of nodeids.
*/
handle = gen_new_kid(ht, htid);
}
if (tb[TCA_U32_SEL] == NULL) {
NL_SET_ERR_MSG_MOD(extack, "Selector not specified");
err = -EINVAL;
goto erridr;
}
s = nla_data(tb[TCA_U32_SEL]);
sel_size = struct_size(s, keys, s->nkeys);
if (nla_len(tb[TCA_U32_SEL]) < sel_size) {
err = -EINVAL;
goto erridr;
}
n = kzalloc(struct_size(n, sel.keys, s->nkeys), GFP_KERNEL);
if (n == NULL) {
err = -ENOBUFS;
goto erridr;
}
#ifdef CONFIG_CLS_U32_PERF
n->pf = __alloc_percpu(struct_size(n->pf, kcnts, s->nkeys),
__alignof__(struct tc_u32_pcnt));
if (!n->pf) {
err = -ENOBUFS;
goto errfree;
}
#endif
unsafe_memcpy(&n->sel, s, sel_size,
/* A composite flex-array structure destination,
* which was correctly sized with struct_size(),
* bounds-checked against nla_len(), and allocated
* above. */);
RCU_INIT_POINTER(n->ht_up, ht);
n->handle = handle;
n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
n->flags = userflags;
err = tcf_exts_init(&n->exts, net, TCA_U32_ACT, TCA_U32_POLICE);
if (err < 0)
goto errout;
#ifdef CONFIG_CLS_U32_MARK
n->pcpu_success = alloc_percpu(u32);
if (!n->pcpu_success) {
err = -ENOMEM;
goto errout;
}
if (tb[TCA_U32_MARK]) {
struct tc_u32_mark *mark;
mark = nla_data(tb[TCA_U32_MARK]);
n->val = mark->val;
n->mask = mark->mask;
}
#endif
err = u32_set_parms(net, tp, n, tb, tca[TCA_RATE],
flags, n->flags, extack);
u32_bind_filter(tp, n, base, tb);
if (err == 0) {
struct tc_u_knode __rcu **ins;
struct tc_u_knode *pins;
err = u32_replace_hw_knode(tp, n, flags, extack);
if (err)
goto errunbind;
if (!tc_in_hw(n->flags))
n->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
ins = &ht->ht[TC_U32_HASH(handle)];
for (pins = rtnl_dereference(*ins); pins;
ins = &pins->next, pins = rtnl_dereference(*ins))
if (TC_U32_NODE(handle) < TC_U32_NODE(pins->handle))
break;
RCU_INIT_POINTER(n->next, pins);
rcu_assign_pointer(*ins, n);
tp_c->knodes++;
*arg = n;
return 0;
}
errunbind:
u32_unbind_filter(tp, n, tb);
#ifdef CONFIG_CLS_U32_MARK
free_percpu(n->pcpu_success);
#endif
errout:
tcf_exts_destroy(&n->exts);
#ifdef CONFIG_CLS_U32_PERF
errfree:
free_percpu(n->pf);
#endif
kfree(n);
erridr:
idr_remove(&ht->handle_idr, handle);
return err;
}
static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg,
bool rtnl_held)
{
struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode *ht;
struct tc_u_knode *n;
unsigned int h;
if (arg->stop)
return;
for (ht = rtnl_dereference(tp_c->hlist);
ht;
ht = rtnl_dereference(ht->next)) {
if (ht->prio != tp->prio)
continue;
if (!tc_cls_stats_dump(tp, arg, ht))
return;
for (h = 0; h <= ht->divisor; h++) {
for (n = rtnl_dereference(ht->ht[h]);
n;
n = rtnl_dereference(n->next)) {
if (!tc_cls_stats_dump(tp, arg, n))
return;
}
}
}
}
static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
bool add, flow_setup_cb_t *cb, void *cb_priv,
struct netlink_ext_ack *extack)
{
struct tc_cls_u32_offload cls_u32 = {};
int err;
tc_cls_common_offload_init(&cls_u32.common, tp, ht->flags, extack);
cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE;
cls_u32.hnode.divisor = ht->divisor;
cls_u32.hnode.handle = ht->handle;
cls_u32.hnode.prio = ht->prio;
err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv);
if (err && add && tc_skip_sw(ht->flags))
return err;
return 0;
}
static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
bool add, flow_setup_cb_t *cb, void *cb_priv,
struct netlink_ext_ack *extack)
{
struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
struct tcf_block *block = tp->chain->block;
struct tc_cls_u32_offload cls_u32 = {};
tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
cls_u32.command = add ?
TC_CLSU32_REPLACE_KNODE : TC_CLSU32_DELETE_KNODE;
cls_u32.knode.handle = n->handle;
if (add) {
cls_u32.knode.fshift = n->fshift;
#ifdef CONFIG_CLS_U32_MARK
cls_u32.knode.val = n->val;
cls_u32.knode.mask = n->mask;
#else
cls_u32.knode.val = 0;
cls_u32.knode.mask = 0;
#endif
cls_u32.knode.sel = &n->sel;
cls_u32.knode.res = &n->res;
cls_u32.knode.exts = &n->exts;
if (n->ht_down)
cls_u32.knode.link_handle = ht->handle;
}
return tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSU32,
&cls_u32, cb_priv, &n->flags,
&n->in_hw_count);
}
static int u32_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
void *cb_priv, struct netlink_ext_ack *extack)
{
struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode *ht;
struct tc_u_knode *n;
unsigned int h;
int err;
for (ht = rtnl_dereference(tp_c->hlist);
ht;
ht = rtnl_dereference(ht->next)) {
if (ht->prio != tp->prio)
continue;
/* When adding filters to a new dev, try to offload the
* hashtable first. When removing, do the filters before the
* hashtable.
*/
if (add && !tc_skip_hw(ht->flags)) {
err = u32_reoffload_hnode(tp, ht, add, cb, cb_priv,
extack);
if (err)
return err;
}
for (h = 0; h <= ht->divisor; h++) {
for (n = rtnl_dereference(ht->ht[h]);
n;
n = rtnl_dereference(n->next)) {
if (tc_skip_hw(n->flags))
continue;
err = u32_reoffload_knode(tp, n, add, cb,
cb_priv, extack);
if (err)
return err;
}
}
if (!add && !tc_skip_hw(ht->flags))
u32_reoffload_hnode(tp, ht, add, cb, cb_priv, extack);
}
return 0;
}
static void u32_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
unsigned long base)
{
struct tc_u_knode *n = fh;
tc_cls_bind_class(classid, cl, q, &n->res, base);
}
static int u32_dump(struct net *net, struct tcf_proto *tp, void *fh,
struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
{
struct tc_u_knode *n = fh;
struct tc_u_hnode *ht_up, *ht_down;
struct nlattr *nest;
if (n == NULL)
return skb->len;
t->tcm_handle = n->handle;
nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
if (TC_U32_KEY(n->handle) == 0) {
struct tc_u_hnode *ht = fh;
u32 divisor = ht->divisor + 1;
if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
goto nla_put_failure;
} else {
#ifdef CONFIG_CLS_U32_PERF
struct tc_u32_pcnt *gpf;
int cpu;
#endif
if (nla_put(skb, TCA_U32_SEL, struct_size(&n->sel, keys, n->sel.nkeys),
&n->sel))
goto nla_put_failure;
ht_up = rtnl_dereference(n->ht_up);
if (ht_up) {
u32 htid = n->handle & 0xFFFFF000;
if (nla_put_u32(skb, TCA_U32_HASH, htid))
goto nla_put_failure;
}
if (n->res.classid &&
nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
goto nla_put_failure;
ht_down = rtnl_dereference(n->ht_down);
if (ht_down &&
nla_put_u32(skb, TCA_U32_LINK, ht_down->handle))
goto nla_put_failure;
if (n->flags && nla_put_u32(skb, TCA_U32_FLAGS, n->flags))
goto nla_put_failure;
#ifdef CONFIG_CLS_U32_MARK
if ((n->val || n->mask)) {
struct tc_u32_mark mark = {.val = n->val,
.mask = n->mask,
.success = 0};
int cpum;
for_each_possible_cpu(cpum) {
__u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum);
mark.success += cnt;
}
if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark))
goto nla_put_failure;
}
#endif
if (tcf_exts_dump(skb, &n->exts) < 0)
goto nla_put_failure;
if (n->ifindex) {
struct net_device *dev;
dev = __dev_get_by_index(net, n->ifindex);
if (dev && nla_put_string(skb, TCA_U32_INDEV, dev->name))
goto nla_put_failure;
}
#ifdef CONFIG_CLS_U32_PERF
gpf = kzalloc(struct_size(gpf, kcnts, n->sel.nkeys), GFP_KERNEL);
if (!gpf)
goto nla_put_failure;
for_each_possible_cpu(cpu) {
int i;
struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu);
gpf->rcnt += pf->rcnt;
gpf->rhit += pf->rhit;
for (i = 0; i < n->sel.nkeys; i++)
gpf->kcnts[i] += pf->kcnts[i];
}
if (nla_put_64bit(skb, TCA_U32_PCNT, struct_size(gpf, kcnts, n->sel.nkeys),
gpf, TCA_U32_PAD)) {
kfree(gpf);
goto nla_put_failure;
}
kfree(gpf);
#endif
}
nla_nest_end(skb, nest);
if (TC_U32_KEY(n->handle))
if (tcf_exts_dump_stats(skb, &n->exts) < 0)
goto nla_put_failure;
return skb->len;
nla_put_failure:
nla_nest_cancel(skb, nest);
return -1;
}
static struct tcf_proto_ops cls_u32_ops __read_mostly = {
.kind = "u32",
.classify = u32_classify,
.init = u32_init,
.destroy = u32_destroy,
.get = u32_get,
.change = u32_change,
.delete = u32_delete,
.walk = u32_walk,
.reoffload = u32_reoffload,
.dump = u32_dump,
.bind_class = u32_bind_class,
.owner = THIS_MODULE,
};
static int __init init_u32(void)
{
int i, ret;
pr_info("u32 classifier\n");
#ifdef CONFIG_CLS_U32_PERF
pr_info(" Performance counters on\n");
#endif
pr_info(" input device check on\n");
#ifdef CONFIG_NET_CLS_ACT
pr_info(" Actions configured\n");
#endif
tc_u_common_hash = kvmalloc_array(U32_HASH_SIZE,
sizeof(struct hlist_head),
GFP_KERNEL);
if (!tc_u_common_hash)
return -ENOMEM;
for (i = 0; i < U32_HASH_SIZE; i++)
INIT_HLIST_HEAD(&tc_u_common_hash[i]);
ret = register_tcf_proto_ops(&cls_u32_ops);
if (ret)
kvfree(tc_u_common_hash);
return ret;
}
static void __exit exit_u32(void)
{
unregister_tcf_proto_ops(&cls_u32_ops);
kvfree(tc_u_common_hash);
}
module_init(init_u32)
module_exit(exit_u32)
MODULE_LICENSE("GPL");
| linux-master | net/sched/cls_u32.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* net/sched/em_u32.c U32 Ematch
*
* Authors: Thomas Graf <[email protected]>
* Alexey Kuznetsov, <[email protected]>
*
* Based on net/sched/cls_u32.c
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <net/pkt_cls.h>
static int em_u32_match(struct sk_buff *skb, struct tcf_ematch *em,
struct tcf_pkt_info *info)
{
struct tc_u32_key *key = (struct tc_u32_key *) em->data;
const unsigned char *ptr = skb_network_header(skb);
if (info) {
if (info->ptr)
ptr = info->ptr;
ptr += (info->nexthdr & key->offmask);
}
ptr += key->off;
if (!tcf_valid_offset(skb, ptr, sizeof(u32)))
return 0;
return !(((*(__be32 *) ptr) ^ key->val) & key->mask);
}
static struct tcf_ematch_ops em_u32_ops = {
.kind = TCF_EM_U32,
.datalen = sizeof(struct tc_u32_key),
.match = em_u32_match,
.owner = THIS_MODULE,
.link = LIST_HEAD_INIT(em_u32_ops.link)
};
static int __init init_em_u32(void)
{
return tcf_em_register(&em_u32_ops);
}
static void __exit exit_em_u32(void)
{
tcf_em_unregister(&em_u32_ops);
}
MODULE_LICENSE("GPL");
module_init(init_em_u32);
module_exit(exit_em_u32);
MODULE_ALIAS_TCF_EMATCH(TCF_EM_U32);
| linux-master | net/sched/em_u32.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* net/sched/sch_mq.c Classful multiqueue dummy scheduler
*
* Copyright (c) 2009 Patrick McHardy <[email protected]>
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <net/netlink.h>
#include <net/pkt_cls.h>
#include <net/pkt_sched.h>
#include <net/sch_generic.h>
struct mq_sched {
struct Qdisc **qdiscs;
};
static int mq_offload(struct Qdisc *sch, enum tc_mq_command cmd)
{
struct net_device *dev = qdisc_dev(sch);
struct tc_mq_qopt_offload opt = {
.command = cmd,
.handle = sch->handle,
};
if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
return -EOPNOTSUPP;
return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQ, &opt);
}
static int mq_offload_stats(struct Qdisc *sch)
{
struct tc_mq_qopt_offload opt = {
.command = TC_MQ_STATS,
.handle = sch->handle,
.stats = {
.bstats = &sch->bstats,
.qstats = &sch->qstats,
},
};
return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_MQ, &opt);
}
static void mq_destroy(struct Qdisc *sch)
{
struct net_device *dev = qdisc_dev(sch);
struct mq_sched *priv = qdisc_priv(sch);
unsigned int ntx;
mq_offload(sch, TC_MQ_DESTROY);
if (!priv->qdiscs)
return;
for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++)
qdisc_put(priv->qdiscs[ntx]);
kfree(priv->qdiscs);
}
static int mq_init(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
struct net_device *dev = qdisc_dev(sch);
struct mq_sched *priv = qdisc_priv(sch);
struct netdev_queue *dev_queue;
struct Qdisc *qdisc;
unsigned int ntx;
if (sch->parent != TC_H_ROOT)
return -EOPNOTSUPP;
if (!netif_is_multiqueue(dev))
return -EOPNOTSUPP;
/* pre-allocate qdiscs, attachment can't fail */
priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
GFP_KERNEL);
if (!priv->qdiscs)
return -ENOMEM;
for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
dev_queue = netdev_get_tx_queue(dev, ntx);
qdisc = qdisc_create_dflt(dev_queue, get_default_qdisc_ops(dev, ntx),
TC_H_MAKE(TC_H_MAJ(sch->handle),
TC_H_MIN(ntx + 1)),
extack);
if (!qdisc)
return -ENOMEM;
priv->qdiscs[ntx] = qdisc;
qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
}
sch->flags |= TCQ_F_MQROOT;
mq_offload(sch, TC_MQ_CREATE);
return 0;
}
static void mq_attach(struct Qdisc *sch)
{
struct net_device *dev = qdisc_dev(sch);
struct mq_sched *priv = qdisc_priv(sch);
struct Qdisc *qdisc, *old;
unsigned int ntx;
for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
qdisc = priv->qdiscs[ntx];
old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
if (old)
qdisc_put(old);
#ifdef CONFIG_NET_SCHED
if (ntx < dev->real_num_tx_queues)
qdisc_hash_add(qdisc, false);
#endif
}
kfree(priv->qdiscs);
priv->qdiscs = NULL;
}
static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct net_device *dev = qdisc_dev(sch);
struct Qdisc *qdisc;
unsigned int ntx;
sch->q.qlen = 0;
gnet_stats_basic_sync_init(&sch->bstats);
memset(&sch->qstats, 0, sizeof(sch->qstats));
/* MQ supports lockless qdiscs. However, statistics accounting needs
* to account for all, none, or a mix of locked and unlocked child
* qdiscs. Percpu stats are added to counters in-band and locking
* qdisc totals are added at end.
*/
for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
qdisc = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping);
spin_lock_bh(qdisc_lock(qdisc));
gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats,
&qdisc->bstats, false);
gnet_stats_add_queue(&sch->qstats, qdisc->cpu_qstats,
&qdisc->qstats);
sch->q.qlen += qdisc_qlen(qdisc);
spin_unlock_bh(qdisc_lock(qdisc));
}
return mq_offload_stats(sch);
}
static struct netdev_queue *mq_queue_get(struct Qdisc *sch, unsigned long cl)
{
struct net_device *dev = qdisc_dev(sch);
unsigned long ntx = cl - 1;
if (ntx >= dev->num_tx_queues)
return NULL;
return netdev_get_tx_queue(dev, ntx);
}
static struct netdev_queue *mq_select_queue(struct Qdisc *sch,
struct tcmsg *tcm)
{
return mq_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
}
static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
struct Qdisc **old, struct netlink_ext_ack *extack)
{
struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
struct tc_mq_qopt_offload graft_offload;
struct net_device *dev = qdisc_dev(sch);
if (dev->flags & IFF_UP)
dev_deactivate(dev);
*old = dev_graft_qdisc(dev_queue, new);
if (new)
new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
if (dev->flags & IFF_UP)
dev_activate(dev);
graft_offload.handle = sch->handle;
graft_offload.graft_params.queue = cl - 1;
graft_offload.graft_params.child_handle = new ? new->handle : 0;
graft_offload.command = TC_MQ_GRAFT;
qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, *old,
TC_SETUP_QDISC_MQ, &graft_offload, extack);
return 0;
}
static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl)
{
struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
return rtnl_dereference(dev_queue->qdisc_sleeping);
}
static unsigned long mq_find(struct Qdisc *sch, u32 classid)
{
unsigned int ntx = TC_H_MIN(classid);
if (!mq_queue_get(sch, ntx))
return 0;
return ntx;
}
static int mq_dump_class(struct Qdisc *sch, unsigned long cl,
struct sk_buff *skb, struct tcmsg *tcm)
{
struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
tcm->tcm_parent = TC_H_ROOT;
tcm->tcm_handle |= TC_H_MIN(cl);
tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle;
return 0;
}
static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
struct gnet_dump *d)
{
struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
sch = rtnl_dereference(dev_queue->qdisc_sleeping);
if (gnet_stats_copy_basic(d, sch->cpu_bstats, &sch->bstats, true) < 0 ||
qdisc_qstats_copy(d, sch) < 0)
return -1;
return 0;
}
static void mq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
struct net_device *dev = qdisc_dev(sch);
unsigned int ntx;
if (arg->stop)
return;
arg->count = arg->skip;
for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
if (!tc_qdisc_stats_dump(sch, ntx + 1, arg))
break;
}
}
static const struct Qdisc_class_ops mq_class_ops = {
.select_queue = mq_select_queue,
.graft = mq_graft,
.leaf = mq_leaf,
.find = mq_find,
.walk = mq_walk,
.dump = mq_dump_class,
.dump_stats = mq_dump_class_stats,
};
struct Qdisc_ops mq_qdisc_ops __read_mostly = {
.cl_ops = &mq_class_ops,
.id = "mq",
.priv_size = sizeof(struct mq_sched),
.init = mq_init,
.destroy = mq_destroy,
.attach = mq_attach,
.change_real_num_tx = mq_change_real_num_tx,
.dump = mq_dump,
.owner = THIS_MODULE,
};
| linux-master | net/sched/sch_mq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008, Intel Corporation.
*
* Author: Alexander Duyck <[email protected]>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/dsfield.h>
#include <net/pkt_cls.h>
#include <net/tc_wrapper.h>
#include <linux/tc_act/tc_skbedit.h>
#include <net/tc_act/tc_skbedit.h>
static struct tc_action_ops act_skbedit_ops;
static u16 tcf_skbedit_hash(struct tcf_skbedit_params *params,
struct sk_buff *skb)
{
u16 queue_mapping = params->queue_mapping;
if (params->flags & SKBEDIT_F_TXQ_SKBHASH) {
u32 hash = skb_get_hash(skb);
queue_mapping += hash % params->mapping_mod;
}
return netdev_cap_txqueue(skb->dev, queue_mapping);
}
TC_INDIRECT_SCOPE int tcf_skbedit_act(struct sk_buff *skb,
const struct tc_action *a,
struct tcf_result *res)
{
struct tcf_skbedit *d = to_skbedit(a);
struct tcf_skbedit_params *params;
int action;
tcf_lastuse_update(&d->tcf_tm);
bstats_update(this_cpu_ptr(d->common.cpu_bstats), skb);
params = rcu_dereference_bh(d->params);
action = READ_ONCE(d->tcf_action);
if (params->flags & SKBEDIT_F_PRIORITY)
skb->priority = params->priority;
if (params->flags & SKBEDIT_F_INHERITDSFIELD) {
int wlen = skb_network_offset(skb);
switch (skb_protocol(skb, true)) {
case htons(ETH_P_IP):
wlen += sizeof(struct iphdr);
if (!pskb_may_pull(skb, wlen))
goto err;
skb->priority = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
break;
case htons(ETH_P_IPV6):
wlen += sizeof(struct ipv6hdr);
if (!pskb_may_pull(skb, wlen))
goto err;
skb->priority = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
break;
}
}
if (params->flags & SKBEDIT_F_QUEUE_MAPPING &&
skb->dev->real_num_tx_queues > params->queue_mapping) {
#ifdef CONFIG_NET_EGRESS
netdev_xmit_skip_txqueue(true);
#endif
skb_set_queue_mapping(skb, tcf_skbedit_hash(params, skb));
}
if (params->flags & SKBEDIT_F_MARK) {
skb->mark &= ~params->mask;
skb->mark |= params->mark & params->mask;
}
if (params->flags & SKBEDIT_F_PTYPE)
skb->pkt_type = params->ptype;
return action;
err:
qstats_drop_inc(this_cpu_ptr(d->common.cpu_qstats));
return TC_ACT_SHOT;
}
static void tcf_skbedit_stats_update(struct tc_action *a, u64 bytes,
u64 packets, u64 drops,
u64 lastuse, bool hw)
{
struct tcf_skbedit *d = to_skbedit(a);
struct tcf_t *tm = &d->tcf_tm;
tcf_action_update_stats(a, bytes, packets, drops, hw);
tm->lastuse = max_t(u64, tm->lastuse, lastuse);
}
static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = {
[TCA_SKBEDIT_PARMS] = { .len = sizeof(struct tc_skbedit) },
[TCA_SKBEDIT_PRIORITY] = { .len = sizeof(u32) },
[TCA_SKBEDIT_QUEUE_MAPPING] = { .len = sizeof(u16) },
[TCA_SKBEDIT_MARK] = { .len = sizeof(u32) },
[TCA_SKBEDIT_PTYPE] = { .len = sizeof(u16) },
[TCA_SKBEDIT_MASK] = { .len = sizeof(u32) },
[TCA_SKBEDIT_FLAGS] = { .len = sizeof(u64) },
[TCA_SKBEDIT_QUEUE_MAPPING_MAX] = { .len = sizeof(u16) },
};
static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
struct tcf_proto *tp, u32 act_flags,
struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, act_skbedit_ops.net_id);
bool bind = act_flags & TCA_ACT_FLAGS_BIND;
struct tcf_skbedit_params *params_new;
struct nlattr *tb[TCA_SKBEDIT_MAX + 1];
struct tcf_chain *goto_ch = NULL;
struct tc_skbedit *parm;
struct tcf_skbedit *d;
u32 flags = 0, *priority = NULL, *mark = NULL, *mask = NULL;
u16 *queue_mapping = NULL, *ptype = NULL;
u16 mapping_mod = 1;
bool exists = false;
int ret = 0, err;
u32 index;
if (nla == NULL)
return -EINVAL;
err = nla_parse_nested_deprecated(tb, TCA_SKBEDIT_MAX, nla,
skbedit_policy, NULL);
if (err < 0)
return err;
if (tb[TCA_SKBEDIT_PARMS] == NULL)
return -EINVAL;
if (tb[TCA_SKBEDIT_PRIORITY] != NULL) {
flags |= SKBEDIT_F_PRIORITY;
priority = nla_data(tb[TCA_SKBEDIT_PRIORITY]);
}
if (tb[TCA_SKBEDIT_QUEUE_MAPPING] != NULL) {
if (is_tcf_skbedit_ingress(act_flags) &&
!(act_flags & TCA_ACT_FLAGS_SKIP_SW)) {
NL_SET_ERR_MSG_MOD(extack, "\"queue_mapping\" option on receive side is hardware only, use skip_sw");
return -EOPNOTSUPP;
}
flags |= SKBEDIT_F_QUEUE_MAPPING;
queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]);
}
if (tb[TCA_SKBEDIT_PTYPE] != NULL) {
ptype = nla_data(tb[TCA_SKBEDIT_PTYPE]);
if (!skb_pkt_type_ok(*ptype))
return -EINVAL;
flags |= SKBEDIT_F_PTYPE;
}
if (tb[TCA_SKBEDIT_MARK] != NULL) {
flags |= SKBEDIT_F_MARK;
mark = nla_data(tb[TCA_SKBEDIT_MARK]);
}
if (tb[TCA_SKBEDIT_MASK] != NULL) {
flags |= SKBEDIT_F_MASK;
mask = nla_data(tb[TCA_SKBEDIT_MASK]);
}
if (tb[TCA_SKBEDIT_FLAGS] != NULL) {
u64 *pure_flags = nla_data(tb[TCA_SKBEDIT_FLAGS]);
if (*pure_flags & SKBEDIT_F_TXQ_SKBHASH) {
u16 *queue_mapping_max;
if (!tb[TCA_SKBEDIT_QUEUE_MAPPING] ||
!tb[TCA_SKBEDIT_QUEUE_MAPPING_MAX]) {
NL_SET_ERR_MSG_MOD(extack, "Missing required range of queue_mapping.");
return -EINVAL;
}
queue_mapping_max =
nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING_MAX]);
if (*queue_mapping_max < *queue_mapping) {
NL_SET_ERR_MSG_MOD(extack, "The range of queue_mapping is invalid, max < min.");
return -EINVAL;
}
mapping_mod = *queue_mapping_max - *queue_mapping + 1;
flags |= SKBEDIT_F_TXQ_SKBHASH;
}
if (*pure_flags & SKBEDIT_F_INHERITDSFIELD)
flags |= SKBEDIT_F_INHERITDSFIELD;
}
parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
index = parm->index;
err = tcf_idr_check_alloc(tn, &index, a, bind);
if (err < 0)
return err;
exists = err;
if (exists && bind)
return 0;
if (!flags) {
if (exists)
tcf_idr_release(*a, bind);
else
tcf_idr_cleanup(tn, index);
return -EINVAL;
}
if (!exists) {
ret = tcf_idr_create(tn, index, est, a,
&act_skbedit_ops, bind, true, act_flags);
if (ret) {
tcf_idr_cleanup(tn, index);
return ret;
}
d = to_skbedit(*a);
ret = ACT_P_CREATED;
} else {
d = to_skbedit(*a);
if (!(act_flags & TCA_ACT_FLAGS_REPLACE)) {
tcf_idr_release(*a, bind);
return -EEXIST;
}
}
err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
if (err < 0)
goto release_idr;
params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
if (unlikely(!params_new)) {
err = -ENOMEM;
goto put_chain;
}
params_new->flags = flags;
if (flags & SKBEDIT_F_PRIORITY)
params_new->priority = *priority;
if (flags & SKBEDIT_F_QUEUE_MAPPING) {
params_new->queue_mapping = *queue_mapping;
params_new->mapping_mod = mapping_mod;
}
if (flags & SKBEDIT_F_MARK)
params_new->mark = *mark;
if (flags & SKBEDIT_F_PTYPE)
params_new->ptype = *ptype;
/* default behaviour is to use all the bits */
params_new->mask = 0xffffffff;
if (flags & SKBEDIT_F_MASK)
params_new->mask = *mask;
spin_lock_bh(&d->tcf_lock);
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
params_new = rcu_replace_pointer(d->params, params_new,
lockdep_is_held(&d->tcf_lock));
spin_unlock_bh(&d->tcf_lock);
if (params_new)
kfree_rcu(params_new, rcu);
if (goto_ch)
tcf_chain_put_by_act(goto_ch);
return ret;
put_chain:
if (goto_ch)
tcf_chain_put_by_act(goto_ch);
release_idr:
tcf_idr_release(*a, bind);
return err;
}
static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
int bind, int ref)
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_skbedit *d = to_skbedit(a);
struct tcf_skbedit_params *params;
struct tc_skbedit opt = {
.index = d->tcf_index,
.refcnt = refcount_read(&d->tcf_refcnt) - ref,
.bindcnt = atomic_read(&d->tcf_bindcnt) - bind,
};
u64 pure_flags = 0;
struct tcf_t t;
spin_lock_bh(&d->tcf_lock);
params = rcu_dereference_protected(d->params,
lockdep_is_held(&d->tcf_lock));
opt.action = d->tcf_action;
if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
if ((params->flags & SKBEDIT_F_PRIORITY) &&
nla_put_u32(skb, TCA_SKBEDIT_PRIORITY, params->priority))
goto nla_put_failure;
if ((params->flags & SKBEDIT_F_QUEUE_MAPPING) &&
nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING, params->queue_mapping))
goto nla_put_failure;
if ((params->flags & SKBEDIT_F_MARK) &&
nla_put_u32(skb, TCA_SKBEDIT_MARK, params->mark))
goto nla_put_failure;
if ((params->flags & SKBEDIT_F_PTYPE) &&
nla_put_u16(skb, TCA_SKBEDIT_PTYPE, params->ptype))
goto nla_put_failure;
if ((params->flags & SKBEDIT_F_MASK) &&
nla_put_u32(skb, TCA_SKBEDIT_MASK, params->mask))
goto nla_put_failure;
if (params->flags & SKBEDIT_F_INHERITDSFIELD)
pure_flags |= SKBEDIT_F_INHERITDSFIELD;
if (params->flags & SKBEDIT_F_TXQ_SKBHASH) {
if (nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING_MAX,
params->queue_mapping + params->mapping_mod - 1))
goto nla_put_failure;
pure_flags |= SKBEDIT_F_TXQ_SKBHASH;
}
if (pure_flags != 0 &&
nla_put(skb, TCA_SKBEDIT_FLAGS, sizeof(pure_flags), &pure_flags))
goto nla_put_failure;
tcf_tm_dump(&t, &d->tcf_tm);
if (nla_put_64bit(skb, TCA_SKBEDIT_TM, sizeof(t), &t, TCA_SKBEDIT_PAD))
goto nla_put_failure;
spin_unlock_bh(&d->tcf_lock);
return skb->len;
nla_put_failure:
spin_unlock_bh(&d->tcf_lock);
nlmsg_trim(skb, b);
return -1;
}
static void tcf_skbedit_cleanup(struct tc_action *a)
{
struct tcf_skbedit *d = to_skbedit(a);
struct tcf_skbedit_params *params;
params = rcu_dereference_protected(d->params, 1);
if (params)
kfree_rcu(params, rcu);
}
static size_t tcf_skbedit_get_fill_size(const struct tc_action *act)
{
return nla_total_size(sizeof(struct tc_skbedit))
+ nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_PRIORITY */
+ nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_QUEUE_MAPPING */
+ nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_QUEUE_MAPPING_MAX */
+ nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MARK */
+ nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_PTYPE */
+ nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MASK */
+ nla_total_size_64bit(sizeof(u64)); /* TCA_SKBEDIT_FLAGS */
}
static int tcf_skbedit_offload_act_setup(struct tc_action *act, void *entry_data,
u32 *index_inc, bool bind,
struct netlink_ext_ack *extack)
{
if (bind) {
struct flow_action_entry *entry = entry_data;
if (is_tcf_skbedit_mark(act)) {
entry->id = FLOW_ACTION_MARK;
entry->mark = tcf_skbedit_mark(act);
} else if (is_tcf_skbedit_ptype(act)) {
entry->id = FLOW_ACTION_PTYPE;
entry->ptype = tcf_skbedit_ptype(act);
} else if (is_tcf_skbedit_priority(act)) {
entry->id = FLOW_ACTION_PRIORITY;
entry->priority = tcf_skbedit_priority(act);
} else if (is_tcf_skbedit_tx_queue_mapping(act)) {
NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"queue_mapping\" option is used on transmit side");
return -EOPNOTSUPP;
} else if (is_tcf_skbedit_rx_queue_mapping(act)) {
entry->id = FLOW_ACTION_RX_QUEUE_MAPPING;
entry->rx_queue = tcf_skbedit_rx_queue_mapping(act);
} else if (is_tcf_skbedit_inheritdsfield(act)) {
NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"inheritdsfield\" option is used");
return -EOPNOTSUPP;
} else {
NL_SET_ERR_MSG_MOD(extack, "Unsupported skbedit option offload");
return -EOPNOTSUPP;
}
*index_inc = 1;
} else {
struct flow_offload_action *fl_action = entry_data;
if (is_tcf_skbedit_mark(act))
fl_action->id = FLOW_ACTION_MARK;
else if (is_tcf_skbedit_ptype(act))
fl_action->id = FLOW_ACTION_PTYPE;
else if (is_tcf_skbedit_priority(act))
fl_action->id = FLOW_ACTION_PRIORITY;
else if (is_tcf_skbedit_rx_queue_mapping(act))
fl_action->id = FLOW_ACTION_RX_QUEUE_MAPPING;
else
return -EOPNOTSUPP;
}
return 0;
}
static struct tc_action_ops act_skbedit_ops = {
.kind = "skbedit",
.id = TCA_ID_SKBEDIT,
.owner = THIS_MODULE,
.act = tcf_skbedit_act,
.stats_update = tcf_skbedit_stats_update,
.dump = tcf_skbedit_dump,
.init = tcf_skbedit_init,
.cleanup = tcf_skbedit_cleanup,
.get_fill_size = tcf_skbedit_get_fill_size,
.offload_act_setup = tcf_skbedit_offload_act_setup,
.size = sizeof(struct tcf_skbedit),
};
static __net_init int skbedit_init_net(struct net *net)
{
struct tc_action_net *tn = net_generic(net, act_skbedit_ops.net_id);
return tc_action_net_init(net, tn, &act_skbedit_ops);
}
static void __net_exit skbedit_exit_net(struct list_head *net_list)
{
tc_action_net_exit(net_list, act_skbedit_ops.net_id);
}
static struct pernet_operations skbedit_net_ops = {
.init = skbedit_init_net,
.exit_batch = skbedit_exit_net,
.id = &act_skbedit_ops.net_id,
.size = sizeof(struct tc_action_net),
};
MODULE_AUTHOR("Alexander Duyck, <[email protected]>");
MODULE_DESCRIPTION("SKB Editing");
MODULE_LICENSE("GPL");
static int __init skbedit_init_module(void)
{
return tcf_register_action(&act_skbedit_ops, &skbedit_net_ops);
}
static void __exit skbedit_cleanup_module(void)
{
tcf_unregister_action(&act_skbedit_ops, &skbedit_net_ops);
}
module_init(skbedit_init_module);
module_exit(skbedit_cleanup_module);
| linux-master | net/sched/act_skbedit.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2015 Jiri Pirko <[email protected]>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/filter.h>
#include <linux/bpf.h>
#include <net/netlink.h>
#include <net/sock.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
#include <linux/tc_act/tc_bpf.h>
#include <net/tc_act/tc_bpf.h>
#include <net/tc_wrapper.h>
#define ACT_BPF_NAME_LEN 256
struct tcf_bpf_cfg {
struct bpf_prog *filter;
struct sock_filter *bpf_ops;
const char *bpf_name;
u16 bpf_num_ops;
bool is_ebpf;
};
static struct tc_action_ops act_bpf_ops;
TC_INDIRECT_SCOPE int tcf_bpf_act(struct sk_buff *skb,
const struct tc_action *act,
struct tcf_result *res)
{
bool at_ingress = skb_at_tc_ingress(skb);
struct tcf_bpf *prog = to_bpf(act);
struct bpf_prog *filter;
int action, filter_res;
tcf_lastuse_update(&prog->tcf_tm);
bstats_update(this_cpu_ptr(prog->common.cpu_bstats), skb);
filter = rcu_dereference(prog->filter);
if (at_ingress) {
__skb_push(skb, skb->mac_len);
bpf_compute_data_pointers(skb);
filter_res = bpf_prog_run(filter, skb);
__skb_pull(skb, skb->mac_len);
} else {
bpf_compute_data_pointers(skb);
filter_res = bpf_prog_run(filter, skb);
}
if (unlikely(!skb->tstamp && skb->mono_delivery_time))
skb->mono_delivery_time = 0;
if (skb_sk_is_prefetched(skb) && filter_res != TC_ACT_OK)
skb_orphan(skb);
/* A BPF program may overwrite the default action opcode.
* Similarly as in cls_bpf, if filter_res == -1 we use the
* default action specified from tc.
*
* In case a different well-known TC_ACT opcode has been
* returned, it will overwrite the default one.
*
* For everything else that is unknown, TC_ACT_UNSPEC is
* returned.
*/
switch (filter_res) {
case TC_ACT_PIPE:
case TC_ACT_RECLASSIFY:
case TC_ACT_OK:
case TC_ACT_REDIRECT:
action = filter_res;
break;
case TC_ACT_SHOT:
action = filter_res;
qstats_drop_inc(this_cpu_ptr(prog->common.cpu_qstats));
break;
case TC_ACT_UNSPEC:
action = prog->tcf_action;
break;
default:
action = TC_ACT_UNSPEC;
break;
}
return action;
}
static bool tcf_bpf_is_ebpf(const struct tcf_bpf *prog)
{
return !prog->bpf_ops;
}
static int tcf_bpf_dump_bpf_info(const struct tcf_bpf *prog,
struct sk_buff *skb)
{
struct nlattr *nla;
if (nla_put_u16(skb, TCA_ACT_BPF_OPS_LEN, prog->bpf_num_ops))
return -EMSGSIZE;
nla = nla_reserve(skb, TCA_ACT_BPF_OPS, prog->bpf_num_ops *
sizeof(struct sock_filter));
if (nla == NULL)
return -EMSGSIZE;
memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
return 0;
}
static int tcf_bpf_dump_ebpf_info(const struct tcf_bpf *prog,
struct sk_buff *skb)
{
struct nlattr *nla;
if (prog->bpf_name &&
nla_put_string(skb, TCA_ACT_BPF_NAME, prog->bpf_name))
return -EMSGSIZE;
if (nla_put_u32(skb, TCA_ACT_BPF_ID, prog->filter->aux->id))
return -EMSGSIZE;
nla = nla_reserve(skb, TCA_ACT_BPF_TAG, sizeof(prog->filter->tag));
if (nla == NULL)
return -EMSGSIZE;
memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
return 0;
}
static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act,
int bind, int ref)
{
unsigned char *tp = skb_tail_pointer(skb);
struct tcf_bpf *prog = to_bpf(act);
struct tc_act_bpf opt = {
.index = prog->tcf_index,
.refcnt = refcount_read(&prog->tcf_refcnt) - ref,
.bindcnt = atomic_read(&prog->tcf_bindcnt) - bind,
};
struct tcf_t tm;
int ret;
spin_lock_bh(&prog->tcf_lock);
opt.action = prog->tcf_action;
if (nla_put(skb, TCA_ACT_BPF_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
if (tcf_bpf_is_ebpf(prog))
ret = tcf_bpf_dump_ebpf_info(prog, skb);
else
ret = tcf_bpf_dump_bpf_info(prog, skb);
if (ret)
goto nla_put_failure;
tcf_tm_dump(&tm, &prog->tcf_tm);
if (nla_put_64bit(skb, TCA_ACT_BPF_TM, sizeof(tm), &tm,
TCA_ACT_BPF_PAD))
goto nla_put_failure;
spin_unlock_bh(&prog->tcf_lock);
return skb->len;
nla_put_failure:
spin_unlock_bh(&prog->tcf_lock);
nlmsg_trim(skb, tp);
return -1;
}
static const struct nla_policy act_bpf_policy[TCA_ACT_BPF_MAX + 1] = {
[TCA_ACT_BPF_PARMS] = { .len = sizeof(struct tc_act_bpf) },
[TCA_ACT_BPF_FD] = { .type = NLA_U32 },
[TCA_ACT_BPF_NAME] = { .type = NLA_NUL_STRING,
.len = ACT_BPF_NAME_LEN },
[TCA_ACT_BPF_OPS_LEN] = { .type = NLA_U16 },
[TCA_ACT_BPF_OPS] = { .type = NLA_BINARY,
.len = sizeof(struct sock_filter) * BPF_MAXINSNS },
};
static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
{
struct sock_filter *bpf_ops;
struct sock_fprog_kern fprog_tmp;
struct bpf_prog *fp;
u16 bpf_size, bpf_num_ops;
int ret;
bpf_num_ops = nla_get_u16(tb[TCA_ACT_BPF_OPS_LEN]);
if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
return -EINVAL;
bpf_size = bpf_num_ops * sizeof(*bpf_ops);
if (bpf_size != nla_len(tb[TCA_ACT_BPF_OPS]))
return -EINVAL;
bpf_ops = kmemdup(nla_data(tb[TCA_ACT_BPF_OPS]), bpf_size, GFP_KERNEL);
if (bpf_ops == NULL)
return -ENOMEM;
fprog_tmp.len = bpf_num_ops;
fprog_tmp.filter = bpf_ops;
ret = bpf_prog_create(&fp, &fprog_tmp);
if (ret < 0) {
kfree(bpf_ops);
return ret;
}
cfg->bpf_ops = bpf_ops;
cfg->bpf_num_ops = bpf_num_ops;
cfg->filter = fp;
cfg->is_ebpf = false;
return 0;
}
static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
{
struct bpf_prog *fp;
char *name = NULL;
u32 bpf_fd;
bpf_fd = nla_get_u32(tb[TCA_ACT_BPF_FD]);
fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_ACT);
if (IS_ERR(fp))
return PTR_ERR(fp);
if (tb[TCA_ACT_BPF_NAME]) {
name = nla_memdup(tb[TCA_ACT_BPF_NAME], GFP_KERNEL);
if (!name) {
bpf_prog_put(fp);
return -ENOMEM;
}
}
cfg->bpf_name = name;
cfg->filter = fp;
cfg->is_ebpf = true;
return 0;
}
static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg)
{
struct bpf_prog *filter = cfg->filter;
if (filter) {
if (cfg->is_ebpf)
bpf_prog_put(filter);
else
bpf_prog_destroy(filter);
}
kfree(cfg->bpf_ops);
kfree(cfg->bpf_name);
}
static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
struct tcf_bpf_cfg *cfg)
{
cfg->is_ebpf = tcf_bpf_is_ebpf(prog);
/* updates to prog->filter are prevented, since it's called either
* with tcf lock or during final cleanup in rcu callback
*/
cfg->filter = rcu_dereference_protected(prog->filter, 1);
cfg->bpf_ops = prog->bpf_ops;
cfg->bpf_name = prog->bpf_name;
}
static int tcf_bpf_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **act,
struct tcf_proto *tp, u32 flags,
struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, act_bpf_ops.net_id);
bool bind = flags & TCA_ACT_FLAGS_BIND;
struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
struct tcf_chain *goto_ch = NULL;
struct tcf_bpf_cfg cfg, old;
struct tc_act_bpf *parm;
struct tcf_bpf *prog;
bool is_bpf, is_ebpf;
int ret, res = 0;
u32 index;
if (!nla)
return -EINVAL;
ret = nla_parse_nested_deprecated(tb, TCA_ACT_BPF_MAX, nla,
act_bpf_policy, NULL);
if (ret < 0)
return ret;
if (!tb[TCA_ACT_BPF_PARMS])
return -EINVAL;
parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
index = parm->index;
ret = tcf_idr_check_alloc(tn, &index, act, bind);
if (!ret) {
ret = tcf_idr_create(tn, index, est, act,
&act_bpf_ops, bind, true, flags);
if (ret < 0) {
tcf_idr_cleanup(tn, index);
return ret;
}
res = ACT_P_CREATED;
} else if (ret > 0) {
/* Don't override defaults. */
if (bind)
return 0;
if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
tcf_idr_release(*act, bind);
return -EEXIST;
}
} else {
return ret;
}
ret = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
if (ret < 0)
goto release_idr;
is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS];
is_ebpf = tb[TCA_ACT_BPF_FD];
if (is_bpf == is_ebpf) {
ret = -EINVAL;
goto put_chain;
}
memset(&cfg, 0, sizeof(cfg));
ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) :
tcf_bpf_init_from_efd(tb, &cfg);
if (ret < 0)
goto put_chain;
prog = to_bpf(*act);
spin_lock_bh(&prog->tcf_lock);
if (res != ACT_P_CREATED)
tcf_bpf_prog_fill_cfg(prog, &old);
prog->bpf_ops = cfg.bpf_ops;
prog->bpf_name = cfg.bpf_name;
if (cfg.bpf_num_ops)
prog->bpf_num_ops = cfg.bpf_num_ops;
goto_ch = tcf_action_set_ctrlact(*act, parm->action, goto_ch);
rcu_assign_pointer(prog->filter, cfg.filter);
spin_unlock_bh(&prog->tcf_lock);
if (goto_ch)
tcf_chain_put_by_act(goto_ch);
if (res != ACT_P_CREATED) {
/* make sure the program being replaced is no longer executing */
synchronize_rcu();
tcf_bpf_cfg_cleanup(&old);
}
return res;
put_chain:
if (goto_ch)
tcf_chain_put_by_act(goto_ch);
release_idr:
tcf_idr_release(*act, bind);
return ret;
}
static void tcf_bpf_cleanup(struct tc_action *act)
{
struct tcf_bpf_cfg tmp;
tcf_bpf_prog_fill_cfg(to_bpf(act), &tmp);
tcf_bpf_cfg_cleanup(&tmp);
}
static struct tc_action_ops act_bpf_ops __read_mostly = {
.kind = "bpf",
.id = TCA_ID_BPF,
.owner = THIS_MODULE,
.act = tcf_bpf_act,
.dump = tcf_bpf_dump,
.cleanup = tcf_bpf_cleanup,
.init = tcf_bpf_init,
.size = sizeof(struct tcf_bpf),
};
static __net_init int bpf_init_net(struct net *net)
{
struct tc_action_net *tn = net_generic(net, act_bpf_ops.net_id);
return tc_action_net_init(net, tn, &act_bpf_ops);
}
static void __net_exit bpf_exit_net(struct list_head *net_list)
{
tc_action_net_exit(net_list, act_bpf_ops.net_id);
}
static struct pernet_operations bpf_net_ops = {
.init = bpf_init_net,
.exit_batch = bpf_exit_net,
.id = &act_bpf_ops.net_id,
.size = sizeof(struct tc_action_net),
};
static int __init bpf_init_module(void)
{
return tcf_register_action(&act_bpf_ops, &bpf_net_ops);
}
static void __exit bpf_cleanup_module(void)
{
tcf_unregister_action(&act_bpf_ops, &bpf_net_ops);
}
module_init(bpf_init_module);
module_exit(bpf_cleanup_module);
MODULE_AUTHOR("Jiri Pirko <[email protected]>");
MODULE_DESCRIPTION("TC BPF based action");
MODULE_LICENSE("GPL v2");
| linux-master | net/sched/act_bpf.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2019 Netronome Systems, Inc. */
#include <linux/if_arp.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mpls.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include <linux/tc_act/tc_mpls.h>
#include <net/mpls.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_mpls.h>
#include <net/tc_wrapper.h>
static struct tc_action_ops act_mpls_ops;
#define ACT_MPLS_TTL_DEFAULT 255
static __be32 tcf_mpls_get_lse(struct mpls_shim_hdr *lse,
struct tcf_mpls_params *p, bool set_bos)
{
u32 new_lse = 0;
if (lse)
new_lse = be32_to_cpu(lse->label_stack_entry);
if (p->tcfm_label != ACT_MPLS_LABEL_NOT_SET) {
new_lse &= ~MPLS_LS_LABEL_MASK;
new_lse |= p->tcfm_label << MPLS_LS_LABEL_SHIFT;
}
if (p->tcfm_ttl) {
new_lse &= ~MPLS_LS_TTL_MASK;
new_lse |= p->tcfm_ttl << MPLS_LS_TTL_SHIFT;
}
if (p->tcfm_tc != ACT_MPLS_TC_NOT_SET) {
new_lse &= ~MPLS_LS_TC_MASK;
new_lse |= p->tcfm_tc << MPLS_LS_TC_SHIFT;
}
if (p->tcfm_bos != ACT_MPLS_BOS_NOT_SET) {
new_lse &= ~MPLS_LS_S_MASK;
new_lse |= p->tcfm_bos << MPLS_LS_S_SHIFT;
} else if (set_bos) {
new_lse |= 1 << MPLS_LS_S_SHIFT;
}
return cpu_to_be32(new_lse);
}
TC_INDIRECT_SCOPE int tcf_mpls_act(struct sk_buff *skb,
const struct tc_action *a,
struct tcf_result *res)
{
struct tcf_mpls *m = to_mpls(a);
struct tcf_mpls_params *p;
__be32 new_lse;
int ret, mac_len;
tcf_lastuse_update(&m->tcf_tm);
bstats_update(this_cpu_ptr(m->common.cpu_bstats), skb);
/* Ensure 'data' points at mac_header prior calling mpls manipulating
* functions.
*/
if (skb_at_tc_ingress(skb)) {
skb_push_rcsum(skb, skb->mac_len);
mac_len = skb->mac_len;
} else {
mac_len = skb_network_offset(skb);
}
ret = READ_ONCE(m->tcf_action);
p = rcu_dereference_bh(m->mpls_p);
switch (p->tcfm_action) {
case TCA_MPLS_ACT_POP:
if (skb_mpls_pop(skb, p->tcfm_proto, mac_len,
skb->dev && skb->dev->type == ARPHRD_ETHER))
goto drop;
break;
case TCA_MPLS_ACT_PUSH:
new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb_protocol(skb, true)));
if (skb_mpls_push(skb, new_lse, p->tcfm_proto, mac_len,
skb->dev && skb->dev->type == ARPHRD_ETHER))
goto drop;
break;
case TCA_MPLS_ACT_MAC_PUSH:
if (skb_vlan_tag_present(skb)) {
if (__vlan_insert_inner_tag(skb, skb->vlan_proto,
skb_vlan_tag_get(skb),
ETH_HLEN) < 0)
goto drop;
skb->protocol = skb->vlan_proto;
__vlan_hwaccel_clear_tag(skb);
}
new_lse = tcf_mpls_get_lse(NULL, p, mac_len ||
!eth_p_mpls(skb->protocol));
if (skb_mpls_push(skb, new_lse, p->tcfm_proto, 0, false))
goto drop;
break;
case TCA_MPLS_ACT_MODIFY:
if (!pskb_may_pull(skb,
skb_network_offset(skb) + MPLS_HLEN))
goto drop;
new_lse = tcf_mpls_get_lse(mpls_hdr(skb), p, false);
if (skb_mpls_update_lse(skb, new_lse))
goto drop;
break;
case TCA_MPLS_ACT_DEC_TTL:
if (skb_mpls_dec_ttl(skb))
goto drop;
break;
}
if (skb_at_tc_ingress(skb))
skb_pull_rcsum(skb, skb->mac_len);
return ret;
drop:
qstats_drop_inc(this_cpu_ptr(m->common.cpu_qstats));
return TC_ACT_SHOT;
}
static int valid_label(const struct nlattr *attr,
struct netlink_ext_ack *extack)
{
const u32 *label = nla_data(attr);
if (nla_len(attr) != sizeof(*label)) {
NL_SET_ERR_MSG_MOD(extack, "Invalid MPLS label length");
return -EINVAL;
}
if (*label & ~MPLS_LABEL_MASK || *label == MPLS_LABEL_IMPLNULL) {
NL_SET_ERR_MSG_MOD(extack, "MPLS label out of range");
return -EINVAL;
}
return 0;
}
static const struct nla_policy mpls_policy[TCA_MPLS_MAX + 1] = {
[TCA_MPLS_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_mpls)),
[TCA_MPLS_PROTO] = { .type = NLA_U16 },
[TCA_MPLS_LABEL] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
valid_label),
[TCA_MPLS_TC] = NLA_POLICY_RANGE(NLA_U8, 0, 7),
[TCA_MPLS_TTL] = NLA_POLICY_MIN(NLA_U8, 1),
[TCA_MPLS_BOS] = NLA_POLICY_RANGE(NLA_U8, 0, 1),
};
static int tcf_mpls_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
struct tcf_proto *tp, u32 flags,
struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, act_mpls_ops.net_id);
bool bind = flags & TCA_ACT_FLAGS_BIND;
struct nlattr *tb[TCA_MPLS_MAX + 1];
struct tcf_chain *goto_ch = NULL;
struct tcf_mpls_params *p;
struct tc_mpls *parm;
bool exists = false;
struct tcf_mpls *m;
int ret = 0, err;
u8 mpls_ttl = 0;
u32 index;
if (!nla) {
NL_SET_ERR_MSG_MOD(extack, "Missing netlink attributes");
return -EINVAL;
}
err = nla_parse_nested(tb, TCA_MPLS_MAX, nla, mpls_policy, extack);
if (err < 0)
return err;
if (!tb[TCA_MPLS_PARMS]) {
NL_SET_ERR_MSG_MOD(extack, "No MPLS params");
return -EINVAL;
}
parm = nla_data(tb[TCA_MPLS_PARMS]);
index = parm->index;
err = tcf_idr_check_alloc(tn, &index, a, bind);
if (err < 0)
return err;
exists = err;
if (exists && bind)
return 0;
if (!exists) {
ret = tcf_idr_create(tn, index, est, a, &act_mpls_ops, bind,
true, flags);
if (ret) {
tcf_idr_cleanup(tn, index);
return ret;
}
ret = ACT_P_CREATED;
} else if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
tcf_idr_release(*a, bind);
return -EEXIST;
}
/* Verify parameters against action type. */
switch (parm->m_action) {
case TCA_MPLS_ACT_POP:
if (!tb[TCA_MPLS_PROTO]) {
NL_SET_ERR_MSG_MOD(extack, "Protocol must be set for MPLS pop");
err = -EINVAL;
goto release_idr;
}
if (!eth_proto_is_802_3(nla_get_be16(tb[TCA_MPLS_PROTO]))) {
NL_SET_ERR_MSG_MOD(extack, "Invalid protocol type for MPLS pop");
err = -EINVAL;
goto release_idr;
}
if (tb[TCA_MPLS_LABEL] || tb[TCA_MPLS_TTL] || tb[TCA_MPLS_TC] ||
tb[TCA_MPLS_BOS]) {
NL_SET_ERR_MSG_MOD(extack, "Label, TTL, TC or BOS cannot be used with MPLS pop");
err = -EINVAL;
goto release_idr;
}
break;
case TCA_MPLS_ACT_DEC_TTL:
if (tb[TCA_MPLS_PROTO] || tb[TCA_MPLS_LABEL] ||
tb[TCA_MPLS_TTL] || tb[TCA_MPLS_TC] || tb[TCA_MPLS_BOS]) {
NL_SET_ERR_MSG_MOD(extack, "Label, TTL, TC, BOS or protocol cannot be used with MPLS dec_ttl");
err = -EINVAL;
goto release_idr;
}
break;
case TCA_MPLS_ACT_PUSH:
case TCA_MPLS_ACT_MAC_PUSH:
if (!tb[TCA_MPLS_LABEL]) {
NL_SET_ERR_MSG_MOD(extack, "Label is required for MPLS push");
err = -EINVAL;
goto release_idr;
}
if (tb[TCA_MPLS_PROTO] &&
!eth_p_mpls(nla_get_be16(tb[TCA_MPLS_PROTO]))) {
NL_SET_ERR_MSG_MOD(extack, "Protocol must be an MPLS type for MPLS push");
err = -EPROTONOSUPPORT;
goto release_idr;
}
/* Push needs a TTL - if not specified, set a default value. */
if (!tb[TCA_MPLS_TTL]) {
#if IS_ENABLED(CONFIG_MPLS)
mpls_ttl = net->mpls.default_ttl ?
net->mpls.default_ttl : ACT_MPLS_TTL_DEFAULT;
#else
mpls_ttl = ACT_MPLS_TTL_DEFAULT;
#endif
}
break;
case TCA_MPLS_ACT_MODIFY:
if (tb[TCA_MPLS_PROTO]) {
NL_SET_ERR_MSG_MOD(extack, "Protocol cannot be used with MPLS modify");
err = -EINVAL;
goto release_idr;
}
break;
default:
NL_SET_ERR_MSG_MOD(extack, "Unknown MPLS action");
err = -EINVAL;
goto release_idr;
}
err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
if (err < 0)
goto release_idr;
m = to_mpls(*a);
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p) {
err = -ENOMEM;
goto put_chain;
}
p->tcfm_action = parm->m_action;
p->tcfm_label = tb[TCA_MPLS_LABEL] ? nla_get_u32(tb[TCA_MPLS_LABEL]) :
ACT_MPLS_LABEL_NOT_SET;
p->tcfm_tc = tb[TCA_MPLS_TC] ? nla_get_u8(tb[TCA_MPLS_TC]) :
ACT_MPLS_TC_NOT_SET;
p->tcfm_ttl = tb[TCA_MPLS_TTL] ? nla_get_u8(tb[TCA_MPLS_TTL]) :
mpls_ttl;
p->tcfm_bos = tb[TCA_MPLS_BOS] ? nla_get_u8(tb[TCA_MPLS_BOS]) :
ACT_MPLS_BOS_NOT_SET;
p->tcfm_proto = tb[TCA_MPLS_PROTO] ? nla_get_be16(tb[TCA_MPLS_PROTO]) :
htons(ETH_P_MPLS_UC);
spin_lock_bh(&m->tcf_lock);
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
p = rcu_replace_pointer(m->mpls_p, p, lockdep_is_held(&m->tcf_lock));
spin_unlock_bh(&m->tcf_lock);
if (goto_ch)
tcf_chain_put_by_act(goto_ch);
if (p)
kfree_rcu(p, rcu);
return ret;
put_chain:
if (goto_ch)
tcf_chain_put_by_act(goto_ch);
release_idr:
tcf_idr_release(*a, bind);
return err;
}
static void tcf_mpls_cleanup(struct tc_action *a)
{
struct tcf_mpls *m = to_mpls(a);
struct tcf_mpls_params *p;
p = rcu_dereference_protected(m->mpls_p, 1);
if (p)
kfree_rcu(p, rcu);
}
static int tcf_mpls_dump(struct sk_buff *skb, struct tc_action *a,
int bind, int ref)
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_mpls *m = to_mpls(a);
struct tcf_mpls_params *p;
struct tc_mpls opt = {
.index = m->tcf_index,
.refcnt = refcount_read(&m->tcf_refcnt) - ref,
.bindcnt = atomic_read(&m->tcf_bindcnt) - bind,
};
struct tcf_t t;
spin_lock_bh(&m->tcf_lock);
opt.action = m->tcf_action;
p = rcu_dereference_protected(m->mpls_p, lockdep_is_held(&m->tcf_lock));
opt.m_action = p->tcfm_action;
if (nla_put(skb, TCA_MPLS_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
if (p->tcfm_label != ACT_MPLS_LABEL_NOT_SET &&
nla_put_u32(skb, TCA_MPLS_LABEL, p->tcfm_label))
goto nla_put_failure;
if (p->tcfm_tc != ACT_MPLS_TC_NOT_SET &&
nla_put_u8(skb, TCA_MPLS_TC, p->tcfm_tc))
goto nla_put_failure;
if (p->tcfm_ttl && nla_put_u8(skb, TCA_MPLS_TTL, p->tcfm_ttl))
goto nla_put_failure;
if (p->tcfm_bos != ACT_MPLS_BOS_NOT_SET &&
nla_put_u8(skb, TCA_MPLS_BOS, p->tcfm_bos))
goto nla_put_failure;
if (nla_put_be16(skb, TCA_MPLS_PROTO, p->tcfm_proto))
goto nla_put_failure;
tcf_tm_dump(&t, &m->tcf_tm);
if (nla_put_64bit(skb, TCA_MPLS_TM, sizeof(t), &t, TCA_MPLS_PAD))
goto nla_put_failure;
spin_unlock_bh(&m->tcf_lock);
return skb->len;
nla_put_failure:
spin_unlock_bh(&m->tcf_lock);
nlmsg_trim(skb, b);
return -EMSGSIZE;
}
static int tcf_mpls_offload_act_setup(struct tc_action *act, void *entry_data,
u32 *index_inc, bool bind,
struct netlink_ext_ack *extack)
{
if (bind) {
struct flow_action_entry *entry = entry_data;
switch (tcf_mpls_action(act)) {
case TCA_MPLS_ACT_PUSH:
entry->id = FLOW_ACTION_MPLS_PUSH;
entry->mpls_push.proto = tcf_mpls_proto(act);
entry->mpls_push.label = tcf_mpls_label(act);
entry->mpls_push.tc = tcf_mpls_tc(act);
entry->mpls_push.bos = tcf_mpls_bos(act);
entry->mpls_push.ttl = tcf_mpls_ttl(act);
break;
case TCA_MPLS_ACT_POP:
entry->id = FLOW_ACTION_MPLS_POP;
entry->mpls_pop.proto = tcf_mpls_proto(act);
break;
case TCA_MPLS_ACT_MODIFY:
entry->id = FLOW_ACTION_MPLS_MANGLE;
entry->mpls_mangle.label = tcf_mpls_label(act);
entry->mpls_mangle.tc = tcf_mpls_tc(act);
entry->mpls_mangle.bos = tcf_mpls_bos(act);
entry->mpls_mangle.ttl = tcf_mpls_ttl(act);
break;
case TCA_MPLS_ACT_DEC_TTL:
NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"dec_ttl\" option is used");
return -EOPNOTSUPP;
case TCA_MPLS_ACT_MAC_PUSH:
NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"mac_push\" option is used");
return -EOPNOTSUPP;
default:
NL_SET_ERR_MSG_MOD(extack, "Unsupported MPLS mode offload");
return -EOPNOTSUPP;
}
*index_inc = 1;
} else {
struct flow_offload_action *fl_action = entry_data;
switch (tcf_mpls_action(act)) {
case TCA_MPLS_ACT_PUSH:
fl_action->id = FLOW_ACTION_MPLS_PUSH;
break;
case TCA_MPLS_ACT_POP:
fl_action->id = FLOW_ACTION_MPLS_POP;
break;
case TCA_MPLS_ACT_MODIFY:
fl_action->id = FLOW_ACTION_MPLS_MANGLE;
break;
default:
return -EOPNOTSUPP;
}
}
return 0;
}
static struct tc_action_ops act_mpls_ops = {
.kind = "mpls",
.id = TCA_ID_MPLS,
.owner = THIS_MODULE,
.act = tcf_mpls_act,
.dump = tcf_mpls_dump,
.init = tcf_mpls_init,
.cleanup = tcf_mpls_cleanup,
.offload_act_setup = tcf_mpls_offload_act_setup,
.size = sizeof(struct tcf_mpls),
};
static __net_init int mpls_init_net(struct net *net)
{
struct tc_action_net *tn = net_generic(net, act_mpls_ops.net_id);
return tc_action_net_init(net, tn, &act_mpls_ops);
}
static void __net_exit mpls_exit_net(struct list_head *net_list)
{
tc_action_net_exit(net_list, act_mpls_ops.net_id);
}
static struct pernet_operations mpls_net_ops = {
.init = mpls_init_net,
.exit_batch = mpls_exit_net,
.id = &act_mpls_ops.net_id,
.size = sizeof(struct tc_action_net),
};
static int __init mpls_init_module(void)
{
return tcf_register_action(&act_mpls_ops, &mpls_net_ops);
}
static void __exit mpls_cleanup_module(void)
{
tcf_unregister_action(&act_mpls_ops, &mpls_net_ops);
}
module_init(mpls_init_module);
module_exit(mpls_cleanup_module);
MODULE_SOFTDEP("post: mpls_gso");
MODULE_AUTHOR("Netronome Systems <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MPLS manipulation actions");
| linux-master | net/sched/act_mpls.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* net/sched/sch_mqprio.c
*
* Copyright (c) 2010 John Fastabend <[email protected]>
*/
#include <linux/ethtool_netlink.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <linux/module.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/sch_generic.h>
#include <net/pkt_cls.h>
#include "sch_mqprio_lib.h"
struct mqprio_sched {
struct Qdisc **qdiscs;
u16 mode;
u16 shaper;
int hw_offload;
u32 flags;
u64 min_rate[TC_QOPT_MAX_QUEUE];
u64 max_rate[TC_QOPT_MAX_QUEUE];
u32 fp[TC_QOPT_MAX_QUEUE];
};
static int mqprio_enable_offload(struct Qdisc *sch,
const struct tc_mqprio_qopt *qopt,
struct netlink_ext_ack *extack)
{
struct mqprio_sched *priv = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
struct tc_mqprio_qopt_offload mqprio = {
.qopt = *qopt,
.extack = extack,
};
int err, i;
switch (priv->mode) {
case TC_MQPRIO_MODE_DCB:
if (priv->shaper != TC_MQPRIO_SHAPER_DCB)
return -EINVAL;
break;
case TC_MQPRIO_MODE_CHANNEL:
mqprio.flags = priv->flags;
if (priv->flags & TC_MQPRIO_F_MODE)
mqprio.mode = priv->mode;
if (priv->flags & TC_MQPRIO_F_SHAPER)
mqprio.shaper = priv->shaper;
if (priv->flags & TC_MQPRIO_F_MIN_RATE)
for (i = 0; i < mqprio.qopt.num_tc; i++)
mqprio.min_rate[i] = priv->min_rate[i];
if (priv->flags & TC_MQPRIO_F_MAX_RATE)
for (i = 0; i < mqprio.qopt.num_tc; i++)
mqprio.max_rate[i] = priv->max_rate[i];
break;
default:
return -EINVAL;
}
mqprio_fp_to_offload(priv->fp, &mqprio);
err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQPRIO,
&mqprio);
if (err)
return err;
priv->hw_offload = mqprio.qopt.hw;
return 0;
}
static void mqprio_disable_offload(struct Qdisc *sch)
{
struct tc_mqprio_qopt_offload mqprio = { { 0 } };
struct mqprio_sched *priv = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
switch (priv->mode) {
case TC_MQPRIO_MODE_DCB:
case TC_MQPRIO_MODE_CHANNEL:
dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQPRIO,
&mqprio);
break;
}
}
static void mqprio_destroy(struct Qdisc *sch)
{
struct net_device *dev = qdisc_dev(sch);
struct mqprio_sched *priv = qdisc_priv(sch);
unsigned int ntx;
if (priv->qdiscs) {
for (ntx = 0;
ntx < dev->num_tx_queues && priv->qdiscs[ntx];
ntx++)
qdisc_put(priv->qdiscs[ntx]);
kfree(priv->qdiscs);
}
if (priv->hw_offload && dev->netdev_ops->ndo_setup_tc)
mqprio_disable_offload(sch);
else
netdev_set_num_tc(dev, 0);
}
static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt,
const struct tc_mqprio_caps *caps,
struct netlink_ext_ack *extack)
{
int err;
/* Limit qopt->hw to maximum supported offload value. Drivers have
* the option of overriding this later if they don't support the a
* given offload type.
*/
if (qopt->hw > TC_MQPRIO_HW_OFFLOAD_MAX)
qopt->hw = TC_MQPRIO_HW_OFFLOAD_MAX;
/* If hardware offload is requested, we will leave 3 options to the
* device driver:
* - populate the queue counts itself (and ignore what was requested)
* - validate the provided queue counts by itself (and apply them)
* - request queue count validation here (and apply them)
*/
err = mqprio_validate_qopt(dev, qopt,
!qopt->hw || caps->validate_queue_counts,
false, extack);
if (err)
return err;
/* If ndo_setup_tc is not present then hardware doesn't support offload
* and we should return an error.
*/
if (qopt->hw && !dev->netdev_ops->ndo_setup_tc) {
NL_SET_ERR_MSG(extack,
"Device does not support hardware offload");
return -EINVAL;
}
return 0;
}
static const struct
nla_policy mqprio_tc_entry_policy[TCA_MQPRIO_TC_ENTRY_MAX + 1] = {
[TCA_MQPRIO_TC_ENTRY_INDEX] = NLA_POLICY_MAX(NLA_U32,
TC_QOPT_MAX_QUEUE),
[TCA_MQPRIO_TC_ENTRY_FP] = NLA_POLICY_RANGE(NLA_U32,
TC_FP_EXPRESS,
TC_FP_PREEMPTIBLE),
};
static const struct nla_policy mqprio_policy[TCA_MQPRIO_MAX + 1] = {
[TCA_MQPRIO_MODE] = { .len = sizeof(u16) },
[TCA_MQPRIO_SHAPER] = { .len = sizeof(u16) },
[TCA_MQPRIO_MIN_RATE64] = { .type = NLA_NESTED },
[TCA_MQPRIO_MAX_RATE64] = { .type = NLA_NESTED },
[TCA_MQPRIO_TC_ENTRY] = { .type = NLA_NESTED },
};
static int mqprio_parse_tc_entry(u32 fp[TC_QOPT_MAX_QUEUE],
struct nlattr *opt,
unsigned long *seen_tcs,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[TCA_MQPRIO_TC_ENTRY_MAX + 1];
int err, tc;
err = nla_parse_nested(tb, TCA_MQPRIO_TC_ENTRY_MAX, opt,
mqprio_tc_entry_policy, extack);
if (err < 0)
return err;
if (NL_REQ_ATTR_CHECK(extack, opt, tb, TCA_MQPRIO_TC_ENTRY_INDEX)) {
NL_SET_ERR_MSG(extack, "TC entry index missing");
return -EINVAL;
}
tc = nla_get_u32(tb[TCA_MQPRIO_TC_ENTRY_INDEX]);
if (*seen_tcs & BIT(tc)) {
NL_SET_ERR_MSG_ATTR(extack, tb[TCA_MQPRIO_TC_ENTRY_INDEX],
"Duplicate tc entry");
return -EINVAL;
}
*seen_tcs |= BIT(tc);
if (tb[TCA_MQPRIO_TC_ENTRY_FP])
fp[tc] = nla_get_u32(tb[TCA_MQPRIO_TC_ENTRY_FP]);
return 0;
}
static int mqprio_parse_tc_entries(struct Qdisc *sch, struct nlattr *nlattr_opt,
int nlattr_opt_len,
struct netlink_ext_ack *extack)
{
struct mqprio_sched *priv = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
bool have_preemption = false;
unsigned long seen_tcs = 0;
u32 fp[TC_QOPT_MAX_QUEUE];
struct nlattr *n;
int tc, rem;
int err = 0;
for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++)
fp[tc] = priv->fp[tc];
nla_for_each_attr(n, nlattr_opt, nlattr_opt_len, rem) {
if (nla_type(n) != TCA_MQPRIO_TC_ENTRY)
continue;
err = mqprio_parse_tc_entry(fp, n, &seen_tcs, extack);
if (err)
goto out;
}
for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) {
priv->fp[tc] = fp[tc];
if (fp[tc] == TC_FP_PREEMPTIBLE)
have_preemption = true;
}
if (have_preemption && !ethtool_dev_mm_supported(dev)) {
NL_SET_ERR_MSG(extack, "Device does not support preemption");
return -EOPNOTSUPP;
}
out:
return err;
}
/* Parse the other netlink attributes that represent the payload of
* TCA_OPTIONS, which are appended right after struct tc_mqprio_qopt.
*/
static int mqprio_parse_nlattr(struct Qdisc *sch, struct tc_mqprio_qopt *qopt,
struct nlattr *opt,
struct netlink_ext_ack *extack)
{
struct nlattr *nlattr_opt = nla_data(opt) + NLA_ALIGN(sizeof(*qopt));
int nlattr_opt_len = nla_len(opt) - NLA_ALIGN(sizeof(*qopt));
struct mqprio_sched *priv = qdisc_priv(sch);
struct nlattr *tb[TCA_MQPRIO_MAX + 1] = {};
struct nlattr *attr;
int i, rem, err;
if (nlattr_opt_len >= nla_attr_size(0)) {
err = nla_parse_deprecated(tb, TCA_MQPRIO_MAX, nlattr_opt,
nlattr_opt_len, mqprio_policy,
NULL);
if (err < 0)
return err;
}
if (!qopt->hw) {
NL_SET_ERR_MSG(extack,
"mqprio TCA_OPTIONS can only contain netlink attributes in hardware mode");
return -EINVAL;
}
if (tb[TCA_MQPRIO_MODE]) {
priv->flags |= TC_MQPRIO_F_MODE;
priv->mode = nla_get_u16(tb[TCA_MQPRIO_MODE]);
}
if (tb[TCA_MQPRIO_SHAPER]) {
priv->flags |= TC_MQPRIO_F_SHAPER;
priv->shaper = nla_get_u16(tb[TCA_MQPRIO_SHAPER]);
}
if (tb[TCA_MQPRIO_MIN_RATE64]) {
if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
NL_SET_ERR_MSG_ATTR(extack, tb[TCA_MQPRIO_MIN_RATE64],
"min_rate accepted only when shaper is in bw_rlimit mode");
return -EINVAL;
}
i = 0;
nla_for_each_nested(attr, tb[TCA_MQPRIO_MIN_RATE64],
rem) {
if (nla_type(attr) != TCA_MQPRIO_MIN_RATE64) {
NL_SET_ERR_MSG_ATTR(extack, attr,
"Attribute type expected to be TCA_MQPRIO_MIN_RATE64");
return -EINVAL;
}
if (nla_len(attr) != sizeof(u64)) {
NL_SET_ERR_MSG_ATTR(extack, attr,
"Attribute TCA_MQPRIO_MIN_RATE64 expected to have 8 bytes length");
return -EINVAL;
}
if (i >= qopt->num_tc)
break;
priv->min_rate[i] = nla_get_u64(attr);
i++;
}
priv->flags |= TC_MQPRIO_F_MIN_RATE;
}
if (tb[TCA_MQPRIO_MAX_RATE64]) {
if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
NL_SET_ERR_MSG_ATTR(extack, tb[TCA_MQPRIO_MAX_RATE64],
"max_rate accepted only when shaper is in bw_rlimit mode");
return -EINVAL;
}
i = 0;
nla_for_each_nested(attr, tb[TCA_MQPRIO_MAX_RATE64],
rem) {
if (nla_type(attr) != TCA_MQPRIO_MAX_RATE64) {
NL_SET_ERR_MSG_ATTR(extack, attr,
"Attribute type expected to be TCA_MQPRIO_MAX_RATE64");
return -EINVAL;
}
if (nla_len(attr) != sizeof(u64)) {
NL_SET_ERR_MSG_ATTR(extack, attr,
"Attribute TCA_MQPRIO_MAX_RATE64 expected to have 8 bytes length");
return -EINVAL;
}
if (i >= qopt->num_tc)
break;
priv->max_rate[i] = nla_get_u64(attr);
i++;
}
priv->flags |= TC_MQPRIO_F_MAX_RATE;
}
if (tb[TCA_MQPRIO_TC_ENTRY]) {
err = mqprio_parse_tc_entries(sch, nlattr_opt, nlattr_opt_len,
extack);
if (err)
return err;
}
return 0;
}
static int mqprio_init(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
struct net_device *dev = qdisc_dev(sch);
struct mqprio_sched *priv = qdisc_priv(sch);
struct netdev_queue *dev_queue;
struct Qdisc *qdisc;
int i, err = -EOPNOTSUPP;
struct tc_mqprio_qopt *qopt = NULL;
struct tc_mqprio_caps caps;
int len, tc;
BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);
if (sch->parent != TC_H_ROOT)
return -EOPNOTSUPP;
if (!netif_is_multiqueue(dev))
return -EOPNOTSUPP;
/* make certain can allocate enough classids to handle queues */
if (dev->num_tx_queues >= TC_H_MIN_PRIORITY)
return -ENOMEM;
if (!opt || nla_len(opt) < sizeof(*qopt))
return -EINVAL;
for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++)
priv->fp[tc] = TC_FP_EXPRESS;
qdisc_offload_query_caps(dev, TC_SETUP_QDISC_MQPRIO,
&caps, sizeof(caps));
qopt = nla_data(opt);
if (mqprio_parse_opt(dev, qopt, &caps, extack))
return -EINVAL;
len = nla_len(opt) - NLA_ALIGN(sizeof(*qopt));
if (len > 0) {
err = mqprio_parse_nlattr(sch, qopt, opt, extack);
if (err)
return err;
}
/* pre-allocate qdisc, attachment can't fail */
priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
GFP_KERNEL);
if (!priv->qdiscs)
return -ENOMEM;
for (i = 0; i < dev->num_tx_queues; i++) {
dev_queue = netdev_get_tx_queue(dev, i);
qdisc = qdisc_create_dflt(dev_queue,
get_default_qdisc_ops(dev, i),
TC_H_MAKE(TC_H_MAJ(sch->handle),
TC_H_MIN(i + 1)), extack);
if (!qdisc)
return -ENOMEM;
priv->qdiscs[i] = qdisc;
qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
}
/* If the mqprio options indicate that hardware should own
* the queue mapping then run ndo_setup_tc otherwise use the
* supplied and verified mapping
*/
if (qopt->hw) {
err = mqprio_enable_offload(sch, qopt, extack);
if (err)
return err;
} else {
netdev_set_num_tc(dev, qopt->num_tc);
for (i = 0; i < qopt->num_tc; i++)
netdev_set_tc_queue(dev, i,
qopt->count[i], qopt->offset[i]);
}
/* Always use supplied priority mappings */
for (i = 0; i < TC_BITMASK + 1; i++)
netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
sch->flags |= TCQ_F_MQROOT;
return 0;
}
static void mqprio_attach(struct Qdisc *sch)
{
struct net_device *dev = qdisc_dev(sch);
struct mqprio_sched *priv = qdisc_priv(sch);
struct Qdisc *qdisc, *old;
unsigned int ntx;
/* Attach underlying qdisc */
for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
qdisc = priv->qdiscs[ntx];
old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
if (old)
qdisc_put(old);
if (ntx < dev->real_num_tx_queues)
qdisc_hash_add(qdisc, false);
}
kfree(priv->qdiscs);
priv->qdiscs = NULL;
}
static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
unsigned long cl)
{
struct net_device *dev = qdisc_dev(sch);
unsigned long ntx = cl - 1;
if (ntx >= dev->num_tx_queues)
return NULL;
return netdev_get_tx_queue(dev, ntx);
}
static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
struct Qdisc **old, struct netlink_ext_ack *extack)
{
struct net_device *dev = qdisc_dev(sch);
struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
if (!dev_queue)
return -EINVAL;
if (dev->flags & IFF_UP)
dev_deactivate(dev);
*old = dev_graft_qdisc(dev_queue, new);
if (new)
new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
if (dev->flags & IFF_UP)
dev_activate(dev);
return 0;
}
static int dump_rates(struct mqprio_sched *priv,
struct tc_mqprio_qopt *opt, struct sk_buff *skb)
{
struct nlattr *nest;
int i;
if (priv->flags & TC_MQPRIO_F_MIN_RATE) {
nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MIN_RATE64);
if (!nest)
goto nla_put_failure;
for (i = 0; i < opt->num_tc; i++) {
if (nla_put(skb, TCA_MQPRIO_MIN_RATE64,
sizeof(priv->min_rate[i]),
&priv->min_rate[i]))
goto nla_put_failure;
}
nla_nest_end(skb, nest);
}
if (priv->flags & TC_MQPRIO_F_MAX_RATE) {
nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MAX_RATE64);
if (!nest)
goto nla_put_failure;
for (i = 0; i < opt->num_tc; i++) {
if (nla_put(skb, TCA_MQPRIO_MAX_RATE64,
sizeof(priv->max_rate[i]),
&priv->max_rate[i]))
goto nla_put_failure;
}
nla_nest_end(skb, nest);
}
return 0;
nla_put_failure:
nla_nest_cancel(skb, nest);
return -1;
}
static int mqprio_dump_tc_entries(struct mqprio_sched *priv,
struct sk_buff *skb)
{
struct nlattr *n;
int tc;
for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) {
n = nla_nest_start(skb, TCA_MQPRIO_TC_ENTRY);
if (!n)
return -EMSGSIZE;
if (nla_put_u32(skb, TCA_MQPRIO_TC_ENTRY_INDEX, tc))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_MQPRIO_TC_ENTRY_FP, priv->fp[tc]))
goto nla_put_failure;
nla_nest_end(skb, n);
}
return 0;
nla_put_failure:
nla_nest_cancel(skb, n);
return -EMSGSIZE;
}
static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct net_device *dev = qdisc_dev(sch);
struct mqprio_sched *priv = qdisc_priv(sch);
struct nlattr *nla = (struct nlattr *)skb_tail_pointer(skb);
struct tc_mqprio_qopt opt = { 0 };
struct Qdisc *qdisc;
unsigned int ntx;
sch->q.qlen = 0;
gnet_stats_basic_sync_init(&sch->bstats);
memset(&sch->qstats, 0, sizeof(sch->qstats));
/* MQ supports lockless qdiscs. However, statistics accounting needs
* to account for all, none, or a mix of locked and unlocked child
* qdiscs. Percpu stats are added to counters in-band and locking
* qdisc totals are added at end.
*/
for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
qdisc = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping);
spin_lock_bh(qdisc_lock(qdisc));
gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats,
&qdisc->bstats, false);
gnet_stats_add_queue(&sch->qstats, qdisc->cpu_qstats,
&qdisc->qstats);
sch->q.qlen += qdisc_qlen(qdisc);
spin_unlock_bh(qdisc_lock(qdisc));
}
mqprio_qopt_reconstruct(dev, &opt);
opt.hw = priv->hw_offload;
if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
goto nla_put_failure;
if ((priv->flags & TC_MQPRIO_F_MODE) &&
nla_put_u16(skb, TCA_MQPRIO_MODE, priv->mode))
goto nla_put_failure;
if ((priv->flags & TC_MQPRIO_F_SHAPER) &&
nla_put_u16(skb, TCA_MQPRIO_SHAPER, priv->shaper))
goto nla_put_failure;
if ((priv->flags & TC_MQPRIO_F_MIN_RATE ||
priv->flags & TC_MQPRIO_F_MAX_RATE) &&
(dump_rates(priv, &opt, skb) != 0))
goto nla_put_failure;
if (mqprio_dump_tc_entries(priv, skb))
goto nla_put_failure;
return nla_nest_end(skb, nla);
nla_put_failure:
nlmsg_trim(skb, nla);
return -1;
}
static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
{
struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
if (!dev_queue)
return NULL;
return rtnl_dereference(dev_queue->qdisc_sleeping);
}
static unsigned long mqprio_find(struct Qdisc *sch, u32 classid)
{
struct net_device *dev = qdisc_dev(sch);
unsigned int ntx = TC_H_MIN(classid);
/* There are essentially two regions here that have valid classid
* values. The first region will have a classid value of 1 through
* num_tx_queues. All of these are backed by actual Qdiscs.
*/
if (ntx < TC_H_MIN_PRIORITY)
return (ntx <= dev->num_tx_queues) ? ntx : 0;
/* The second region represents the hardware traffic classes. These
* are represented by classid values of TC_H_MIN_PRIORITY through
* TC_H_MIN_PRIORITY + netdev_get_num_tc - 1
*/
return ((ntx - TC_H_MIN_PRIORITY) < netdev_get_num_tc(dev)) ? ntx : 0;
}
static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
struct sk_buff *skb, struct tcmsg *tcm)
{
if (cl < TC_H_MIN_PRIORITY) {
struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
struct net_device *dev = qdisc_dev(sch);
int tc = netdev_txq_to_tc(dev, cl - 1);
tcm->tcm_parent = (tc < 0) ? 0 :
TC_H_MAKE(TC_H_MAJ(sch->handle),
TC_H_MIN(tc + TC_H_MIN_PRIORITY));
tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle;
} else {
tcm->tcm_parent = TC_H_ROOT;
tcm->tcm_info = 0;
}
tcm->tcm_handle |= TC_H_MIN(cl);
return 0;
}
static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
struct gnet_dump *d)
__releases(d->lock)
__acquires(d->lock)
{
if (cl >= TC_H_MIN_PRIORITY) {
int i;
__u32 qlen;
struct gnet_stats_queue qstats = {0};
struct gnet_stats_basic_sync bstats;
struct net_device *dev = qdisc_dev(sch);
struct netdev_tc_txq tc = dev->tc_to_txq[cl & TC_BITMASK];
gnet_stats_basic_sync_init(&bstats);
/* Drop lock here it will be reclaimed before touching
* statistics this is required because the d->lock we
* hold here is the look on dev_queue->qdisc_sleeping
* also acquired below.
*/
if (d->lock)
spin_unlock_bh(d->lock);
for (i = tc.offset; i < tc.offset + tc.count; i++) {
struct netdev_queue *q = netdev_get_tx_queue(dev, i);
struct Qdisc *qdisc = rtnl_dereference(q->qdisc);
spin_lock_bh(qdisc_lock(qdisc));
gnet_stats_add_basic(&bstats, qdisc->cpu_bstats,
&qdisc->bstats, false);
gnet_stats_add_queue(&qstats, qdisc->cpu_qstats,
&qdisc->qstats);
sch->q.qlen += qdisc_qlen(qdisc);
spin_unlock_bh(qdisc_lock(qdisc));
}
qlen = qdisc_qlen(sch) + qstats.qlen;
/* Reclaim root sleeping lock before completing stats */
if (d->lock)
spin_lock_bh(d->lock);
if (gnet_stats_copy_basic(d, NULL, &bstats, false) < 0 ||
gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0)
return -1;
} else {
struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
sch = rtnl_dereference(dev_queue->qdisc_sleeping);
if (gnet_stats_copy_basic(d, sch->cpu_bstats,
&sch->bstats, true) < 0 ||
qdisc_qstats_copy(d, sch) < 0)
return -1;
}
return 0;
}
static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
struct net_device *dev = qdisc_dev(sch);
unsigned long ntx;
if (arg->stop)
return;
/* Walk hierarchy with a virtual class per tc */
arg->count = arg->skip;
for (ntx = arg->skip; ntx < netdev_get_num_tc(dev); ntx++) {
if (!tc_qdisc_stats_dump(sch, ntx + TC_H_MIN_PRIORITY, arg))
return;
}
/* Pad the values and skip over unused traffic classes */
if (ntx < TC_MAX_QUEUE) {
arg->count = TC_MAX_QUEUE;
ntx = TC_MAX_QUEUE;
}
/* Reset offset, sort out remaining per-queue qdiscs */
for (ntx -= TC_MAX_QUEUE; ntx < dev->num_tx_queues; ntx++) {
if (arg->fn(sch, ntx + 1, arg) < 0) {
arg->stop = 1;
return;
}
arg->count++;
}
}
static struct netdev_queue *mqprio_select_queue(struct Qdisc *sch,
struct tcmsg *tcm)
{
return mqprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
}
static const struct Qdisc_class_ops mqprio_class_ops = {
.graft = mqprio_graft,
.leaf = mqprio_leaf,
.find = mqprio_find,
.walk = mqprio_walk,
.dump = mqprio_dump_class,
.dump_stats = mqprio_dump_class_stats,
.select_queue = mqprio_select_queue,
};
static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
.cl_ops = &mqprio_class_ops,
.id = "mqprio",
.priv_size = sizeof(struct mqprio_sched),
.init = mqprio_init,
.destroy = mqprio_destroy,
.attach = mqprio_attach,
.change_real_num_tx = mq_change_real_num_tx,
.dump = mqprio_dump,
.owner = THIS_MODULE,
};
static int __init mqprio_module_init(void)
{
return register_qdisc(&mqprio_qdisc_ops);
}
static void __exit mqprio_module_exit(void)
{
unregister_qdisc(&mqprio_qdisc_ops);
}
module_init(mqprio_module_init);
module_exit(mqprio_module_exit);
MODULE_LICENSE("GPL");
| linux-master | net/sched/sch_mqprio.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) Jonathan Naylor G4KLX ([email protected])
* Copyright (C) Alan Cox GW4PTS ([email protected])
* Copyright (C) Terry Dawson VK2KTJ ([email protected])
* Copyright (C) Tomi Manninen OH2BNS ([email protected])
*/
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/stat.h>
#include <net/net_namespace.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/uaccess.h>
#include <linux/fcntl.h>
#include <linux/termios.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <net/rose.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/tcp_states.h>
#include <net/ip.h>
#include <net/arp.h>
static int rose_ndevs = 10;
int sysctl_rose_restart_request_timeout = ROSE_DEFAULT_T0;
int sysctl_rose_call_request_timeout = ROSE_DEFAULT_T1;
int sysctl_rose_reset_request_timeout = ROSE_DEFAULT_T2;
int sysctl_rose_clear_request_timeout = ROSE_DEFAULT_T3;
int sysctl_rose_no_activity_timeout = ROSE_DEFAULT_IDLE;
int sysctl_rose_ack_hold_back_timeout = ROSE_DEFAULT_HB;
int sysctl_rose_routing_control = ROSE_DEFAULT_ROUTING;
int sysctl_rose_link_fail_timeout = ROSE_DEFAULT_FAIL_TIMEOUT;
int sysctl_rose_maximum_vcs = ROSE_DEFAULT_MAXVC;
int sysctl_rose_window_size = ROSE_DEFAULT_WINDOW_SIZE;
static HLIST_HEAD(rose_list);
static DEFINE_SPINLOCK(rose_list_lock);
static const struct proto_ops rose_proto_ops;
ax25_address rose_callsign;
/*
* ROSE network devices are virtual network devices encapsulating ROSE
* frames into AX.25 which will be sent through an AX.25 device, so form a
* special "super class" of normal net devices; split their locks off into a
* separate class since they always nest.
*/
static struct lock_class_key rose_netdev_xmit_lock_key;
static struct lock_class_key rose_netdev_addr_lock_key;
static void rose_set_lockdep_one(struct net_device *dev,
struct netdev_queue *txq,
void *_unused)
{
lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key);
}
static void rose_set_lockdep_key(struct net_device *dev)
{
lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key);
netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL);
}
/*
* Convert a ROSE address into text.
*/
char *rose2asc(char *buf, const rose_address *addr)
{
if (addr->rose_addr[0] == 0x00 && addr->rose_addr[1] == 0x00 &&
addr->rose_addr[2] == 0x00 && addr->rose_addr[3] == 0x00 &&
addr->rose_addr[4] == 0x00) {
strcpy(buf, "*");
} else {
sprintf(buf, "%02X%02X%02X%02X%02X", addr->rose_addr[0] & 0xFF,
addr->rose_addr[1] & 0xFF,
addr->rose_addr[2] & 0xFF,
addr->rose_addr[3] & 0xFF,
addr->rose_addr[4] & 0xFF);
}
return buf;
}
/*
* Compare two ROSE addresses, 0 == equal.
*/
int rosecmp(const rose_address *addr1, const rose_address *addr2)
{
int i;
for (i = 0; i < 5; i++)
if (addr1->rose_addr[i] != addr2->rose_addr[i])
return 1;
return 0;
}
/*
* Compare two ROSE addresses for only mask digits, 0 == equal.
*/
int rosecmpm(const rose_address *addr1, const rose_address *addr2,
unsigned short mask)
{
unsigned int i, j;
if (mask > 10)
return 1;
for (i = 0; i < mask; i++) {
j = i / 2;
if ((i % 2) != 0) {
if ((addr1->rose_addr[j] & 0x0F) != (addr2->rose_addr[j] & 0x0F))
return 1;
} else {
if ((addr1->rose_addr[j] & 0xF0) != (addr2->rose_addr[j] & 0xF0))
return 1;
}
}
return 0;
}
/*
* Socket removal during an interrupt is now safe.
*/
static void rose_remove_socket(struct sock *sk)
{
spin_lock_bh(&rose_list_lock);
sk_del_node_init(sk);
spin_unlock_bh(&rose_list_lock);
}
/*
* Kill all bound sockets on a broken link layer connection to a
* particular neighbour.
*/
void rose_kill_by_neigh(struct rose_neigh *neigh)
{
struct sock *s;
spin_lock_bh(&rose_list_lock);
sk_for_each(s, &rose_list) {
struct rose_sock *rose = rose_sk(s);
if (rose->neighbour == neigh) {
rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
rose->neighbour->use--;
rose->neighbour = NULL;
}
}
spin_unlock_bh(&rose_list_lock);
}
/*
* Kill all bound sockets on a dropped device.
*/
static void rose_kill_by_device(struct net_device *dev)
{
struct sock *s;
spin_lock_bh(&rose_list_lock);
sk_for_each(s, &rose_list) {
struct rose_sock *rose = rose_sk(s);
if (rose->device == dev) {
rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
if (rose->neighbour)
rose->neighbour->use--;
netdev_put(rose->device, &rose->dev_tracker);
rose->device = NULL;
}
}
spin_unlock_bh(&rose_list_lock);
}
/*
* Handle device status changes.
*/
static int rose_device_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
if (event != NETDEV_DOWN)
return NOTIFY_DONE;
switch (dev->type) {
case ARPHRD_ROSE:
rose_kill_by_device(dev);
break;
case ARPHRD_AX25:
rose_link_device_down(dev);
rose_rt_device_down(dev);
break;
}
return NOTIFY_DONE;
}
/*
* Add a socket to the bound sockets list.
*/
static void rose_insert_socket(struct sock *sk)
{
spin_lock_bh(&rose_list_lock);
sk_add_node(sk, &rose_list);
spin_unlock_bh(&rose_list_lock);
}
/*
* Find a socket that wants to accept the Call Request we just
* received.
*/
static struct sock *rose_find_listener(rose_address *addr, ax25_address *call)
{
struct sock *s;
spin_lock_bh(&rose_list_lock);
sk_for_each(s, &rose_list) {
struct rose_sock *rose = rose_sk(s);
if (!rosecmp(&rose->source_addr, addr) &&
!ax25cmp(&rose->source_call, call) &&
!rose->source_ndigis && s->sk_state == TCP_LISTEN)
goto found;
}
sk_for_each(s, &rose_list) {
struct rose_sock *rose = rose_sk(s);
if (!rosecmp(&rose->source_addr, addr) &&
!ax25cmp(&rose->source_call, &null_ax25_address) &&
s->sk_state == TCP_LISTEN)
goto found;
}
s = NULL;
found:
spin_unlock_bh(&rose_list_lock);
return s;
}
/*
* Find a connected ROSE socket given my LCI and device.
*/
struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh)
{
struct sock *s;
spin_lock_bh(&rose_list_lock);
sk_for_each(s, &rose_list) {
struct rose_sock *rose = rose_sk(s);
if (rose->lci == lci && rose->neighbour == neigh)
goto found;
}
s = NULL;
found:
spin_unlock_bh(&rose_list_lock);
return s;
}
/*
* Find a unique LCI for a given device.
*/
unsigned int rose_new_lci(struct rose_neigh *neigh)
{
int lci;
if (neigh->dce_mode) {
for (lci = 1; lci <= sysctl_rose_maximum_vcs; lci++)
if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
return lci;
} else {
for (lci = sysctl_rose_maximum_vcs; lci > 0; lci--)
if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
return lci;
}
return 0;
}
/*
* Deferred destroy.
*/
void rose_destroy_socket(struct sock *);
/*
* Handler for deferred kills.
*/
static void rose_destroy_timer(struct timer_list *t)
{
struct sock *sk = from_timer(sk, t, sk_timer);
rose_destroy_socket(sk);
}
/*
* This is called from user mode and the timers. Thus it protects itself
* against interrupt users but doesn't worry about being called during
* work. Once it is removed from the queue no interrupt or bottom half
* will touch it and we are (fairly 8-) ) safe.
*/
void rose_destroy_socket(struct sock *sk)
{
struct sk_buff *skb;
rose_remove_socket(sk);
rose_stop_heartbeat(sk);
rose_stop_idletimer(sk);
rose_stop_timer(sk);
rose_clear_queues(sk); /* Flush the queues */
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
if (skb->sk != sk) { /* A pending connection */
/* Queue the unaccepted socket for death */
sock_set_flag(skb->sk, SOCK_DEAD);
rose_start_heartbeat(skb->sk);
rose_sk(skb->sk)->state = ROSE_STATE_0;
}
kfree_skb(skb);
}
if (sk_has_allocations(sk)) {
/* Defer: outstanding buffers */
timer_setup(&sk->sk_timer, rose_destroy_timer, 0);
sk->sk_timer.expires = jiffies + 10 * HZ;
add_timer(&sk->sk_timer);
} else
sock_put(sk);
}
/*
* Handling for system calls applied via the various interfaces to a
* ROSE socket object.
*/
static int rose_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
int opt;
if (level != SOL_ROSE)
return -ENOPROTOOPT;
if (optlen < sizeof(int))
return -EINVAL;
if (copy_from_sockptr(&opt, optval, sizeof(int)))
return -EFAULT;
switch (optname) {
case ROSE_DEFER:
rose->defer = opt ? 1 : 0;
return 0;
case ROSE_T1:
if (opt < 1)
return -EINVAL;
rose->t1 = opt * HZ;
return 0;
case ROSE_T2:
if (opt < 1)
return -EINVAL;
rose->t2 = opt * HZ;
return 0;
case ROSE_T3:
if (opt < 1)
return -EINVAL;
rose->t3 = opt * HZ;
return 0;
case ROSE_HOLDBACK:
if (opt < 1)
return -EINVAL;
rose->hb = opt * HZ;
return 0;
case ROSE_IDLE:
if (opt < 0)
return -EINVAL;
rose->idle = opt * 60 * HZ;
return 0;
case ROSE_QBITINCL:
rose->qbitincl = opt ? 1 : 0;
return 0;
default:
return -ENOPROTOOPT;
}
}
static int rose_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
int val = 0;
int len;
if (level != SOL_ROSE)
return -ENOPROTOOPT;
if (get_user(len, optlen))
return -EFAULT;
if (len < 0)
return -EINVAL;
switch (optname) {
case ROSE_DEFER:
val = rose->defer;
break;
case ROSE_T1:
val = rose->t1 / HZ;
break;
case ROSE_T2:
val = rose->t2 / HZ;
break;
case ROSE_T3:
val = rose->t3 / HZ;
break;
case ROSE_HOLDBACK:
val = rose->hb / HZ;
break;
case ROSE_IDLE:
val = rose->idle / (60 * HZ);
break;
case ROSE_QBITINCL:
val = rose->qbitincl;
break;
default:
return -ENOPROTOOPT;
}
len = min_t(unsigned int, len, sizeof(int));
if (put_user(len, optlen))
return -EFAULT;
return copy_to_user(optval, &val, len) ? -EFAULT : 0;
}
static int rose_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
lock_sock(sk);
if (sock->state != SS_UNCONNECTED) {
release_sock(sk);
return -EINVAL;
}
if (sk->sk_state != TCP_LISTEN) {
struct rose_sock *rose = rose_sk(sk);
rose->dest_ndigis = 0;
memset(&rose->dest_addr, 0, ROSE_ADDR_LEN);
memset(&rose->dest_call, 0, AX25_ADDR_LEN);
memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS);
sk->sk_max_ack_backlog = backlog;
sk->sk_state = TCP_LISTEN;
release_sock(sk);
return 0;
}
release_sock(sk);
return -EOPNOTSUPP;
}
static struct proto rose_proto = {
.name = "ROSE",
.owner = THIS_MODULE,
.obj_size = sizeof(struct rose_sock),
};
static int rose_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
struct rose_sock *rose;
if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
if (sock->type != SOCK_SEQPACKET || protocol != 0)
return -ESOCKTNOSUPPORT;
sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto, kern);
if (sk == NULL)
return -ENOMEM;
rose = rose_sk(sk);
sock_init_data(sock, sk);
skb_queue_head_init(&rose->ack_queue);
#ifdef M_BIT
skb_queue_head_init(&rose->frag_queue);
rose->fraglen = 0;
#endif
sock->ops = &rose_proto_ops;
sk->sk_protocol = protocol;
timer_setup(&rose->timer, NULL, 0);
timer_setup(&rose->idletimer, NULL, 0);
rose->t1 = msecs_to_jiffies(sysctl_rose_call_request_timeout);
rose->t2 = msecs_to_jiffies(sysctl_rose_reset_request_timeout);
rose->t3 = msecs_to_jiffies(sysctl_rose_clear_request_timeout);
rose->hb = msecs_to_jiffies(sysctl_rose_ack_hold_back_timeout);
rose->idle = msecs_to_jiffies(sysctl_rose_no_activity_timeout);
rose->state = ROSE_STATE_0;
return 0;
}
static struct sock *rose_make_new(struct sock *osk)
{
struct sock *sk;
struct rose_sock *rose, *orose;
if (osk->sk_type != SOCK_SEQPACKET)
return NULL;
sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto, 0);
if (sk == NULL)
return NULL;
rose = rose_sk(sk);
sock_init_data(NULL, sk);
skb_queue_head_init(&rose->ack_queue);
#ifdef M_BIT
skb_queue_head_init(&rose->frag_queue);
rose->fraglen = 0;
#endif
sk->sk_type = osk->sk_type;
sk->sk_priority = osk->sk_priority;
sk->sk_protocol = osk->sk_protocol;
sk->sk_rcvbuf = osk->sk_rcvbuf;
sk->sk_sndbuf = osk->sk_sndbuf;
sk->sk_state = TCP_ESTABLISHED;
sock_copy_flags(sk, osk);
timer_setup(&rose->timer, NULL, 0);
timer_setup(&rose->idletimer, NULL, 0);
orose = rose_sk(osk);
rose->t1 = orose->t1;
rose->t2 = orose->t2;
rose->t3 = orose->t3;
rose->hb = orose->hb;
rose->idle = orose->idle;
rose->defer = orose->defer;
rose->device = orose->device;
if (rose->device)
netdev_hold(rose->device, &rose->dev_tracker, GFP_ATOMIC);
rose->qbitincl = orose->qbitincl;
return sk;
}
static int rose_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct rose_sock *rose;
if (sk == NULL) return 0;
sock_hold(sk);
sock_orphan(sk);
lock_sock(sk);
rose = rose_sk(sk);
switch (rose->state) {
case ROSE_STATE_0:
release_sock(sk);
rose_disconnect(sk, 0, -1, -1);
lock_sock(sk);
rose_destroy_socket(sk);
break;
case ROSE_STATE_2:
rose->neighbour->use--;
release_sock(sk);
rose_disconnect(sk, 0, -1, -1);
lock_sock(sk);
rose_destroy_socket(sk);
break;
case ROSE_STATE_1:
case ROSE_STATE_3:
case ROSE_STATE_4:
case ROSE_STATE_5:
rose_clear_queues(sk);
rose_stop_idletimer(sk);
rose_write_internal(sk, ROSE_CLEAR_REQUEST);
rose_start_t3timer(sk);
rose->state = ROSE_STATE_2;
sk->sk_state = TCP_CLOSE;
sk->sk_shutdown |= SEND_SHUTDOWN;
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
sock_set_flag(sk, SOCK_DESTROY);
break;
default:
break;
}
netdev_put(rose->device, &rose->dev_tracker);
sock->sk = NULL;
release_sock(sk);
sock_put(sk);
return 0;
}
static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
struct net_device *dev;
ax25_address *source;
ax25_uid_assoc *user;
int n;
if (!sock_flag(sk, SOCK_ZAPPED))
return -EINVAL;
if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
return -EINVAL;
if (addr->srose_family != AF_ROSE)
return -EINVAL;
if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
return -EINVAL;
if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
return -EINVAL;
if ((dev = rose_dev_get(&addr->srose_addr)) == NULL)
return -EADDRNOTAVAIL;
source = &addr->srose_call;
user = ax25_findbyuid(current_euid());
if (user) {
rose->source_call = user->call;
ax25_uid_put(user);
} else {
if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) {
dev_put(dev);
return -EACCES;
}
rose->source_call = *source;
}
rose->source_addr = addr->srose_addr;
rose->device = dev;
netdev_tracker_alloc(rose->device, &rose->dev_tracker, GFP_KERNEL);
rose->source_ndigis = addr->srose_ndigis;
if (addr_len == sizeof(struct full_sockaddr_rose)) {
struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
for (n = 0 ; n < addr->srose_ndigis ; n++)
rose->source_digis[n] = full_addr->srose_digis[n];
} else {
if (rose->source_ndigis == 1) {
rose->source_digis[0] = addr->srose_digi;
}
}
rose_insert_socket(sk);
sock_reset_flag(sk, SOCK_ZAPPED);
return 0;
}
static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags)
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
unsigned char cause, diagnostic;
ax25_uid_assoc *user;
int n, err = 0;
if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
return -EINVAL;
if (addr->srose_family != AF_ROSE)
return -EINVAL;
if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
return -EINVAL;
if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
return -EINVAL;
/* Source + Destination digis should not exceed ROSE_MAX_DIGIS */
if ((rose->source_ndigis + addr->srose_ndigis) > ROSE_MAX_DIGIS)
return -EINVAL;
lock_sock(sk);
if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
/* Connect completed during a ERESTARTSYS event */
sock->state = SS_CONNECTED;
goto out_release;
}
if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
sock->state = SS_UNCONNECTED;
err = -ECONNREFUSED;
goto out_release;
}
if (sk->sk_state == TCP_ESTABLISHED) {
/* No reconnect on a seqpacket socket */
err = -EISCONN;
goto out_release;
}
sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause,
&diagnostic, 0);
if (!rose->neighbour) {
err = -ENETUNREACH;
goto out_release;
}
rose->lci = rose_new_lci(rose->neighbour);
if (!rose->lci) {
err = -ENETUNREACH;
goto out_release;
}
if (sock_flag(sk, SOCK_ZAPPED)) { /* Must bind first - autobinding in this may or may not work */
struct net_device *dev;
sock_reset_flag(sk, SOCK_ZAPPED);
dev = rose_dev_first();
if (!dev) {
err = -ENETUNREACH;
goto out_release;
}
user = ax25_findbyuid(current_euid());
if (!user) {
err = -EINVAL;
dev_put(dev);
goto out_release;
}
memcpy(&rose->source_addr, dev->dev_addr, ROSE_ADDR_LEN);
rose->source_call = user->call;
rose->device = dev;
netdev_tracker_alloc(rose->device, &rose->dev_tracker,
GFP_KERNEL);
ax25_uid_put(user);
rose_insert_socket(sk); /* Finish the bind */
}
rose->dest_addr = addr->srose_addr;
rose->dest_call = addr->srose_call;
rose->rand = ((long)rose & 0xFFFF) + rose->lci;
rose->dest_ndigis = addr->srose_ndigis;
if (addr_len == sizeof(struct full_sockaddr_rose)) {
struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
for (n = 0 ; n < addr->srose_ndigis ; n++)
rose->dest_digis[n] = full_addr->srose_digis[n];
} else {
if (rose->dest_ndigis == 1) {
rose->dest_digis[0] = addr->srose_digi;
}
}
/* Move to connecting socket, start sending Connect Requests */
sock->state = SS_CONNECTING;
sk->sk_state = TCP_SYN_SENT;
rose->state = ROSE_STATE_1;
rose->neighbour->use++;
rose_write_internal(sk, ROSE_CALL_REQUEST);
rose_start_heartbeat(sk);
rose_start_t1timer(sk);
/* Now the loop */
if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) {
err = -EINPROGRESS;
goto out_release;
}
/*
* A Connect Ack with Choke or timeout or failed routing will go to
* closed.
*/
if (sk->sk_state == TCP_SYN_SENT) {
DEFINE_WAIT(wait);
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
if (sk->sk_state != TCP_SYN_SENT)
break;
if (!signal_pending(current)) {
release_sock(sk);
schedule();
lock_sock(sk);
continue;
}
err = -ERESTARTSYS;
break;
}
finish_wait(sk_sleep(sk), &wait);
if (err)
goto out_release;
}
if (sk->sk_state != TCP_ESTABLISHED) {
sock->state = SS_UNCONNECTED;
err = sock_error(sk); /* Always set at this point */
goto out_release;
}
sock->state = SS_CONNECTED;
out_release:
release_sock(sk);
return err;
}
static int rose_accept(struct socket *sock, struct socket *newsock, int flags,
bool kern)
{
struct sk_buff *skb;
struct sock *newsk;
DEFINE_WAIT(wait);
struct sock *sk;
int err = 0;
if ((sk = sock->sk) == NULL)
return -EINVAL;
lock_sock(sk);
if (sk->sk_type != SOCK_SEQPACKET) {
err = -EOPNOTSUPP;
goto out_release;
}
if (sk->sk_state != TCP_LISTEN) {
err = -EINVAL;
goto out_release;
}
/*
* The write queue this time is holding sockets ready to use
* hooked into the SABM we saved
*/
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
skb = skb_dequeue(&sk->sk_receive_queue);
if (skb)
break;
if (flags & O_NONBLOCK) {
err = -EWOULDBLOCK;
break;
}
if (!signal_pending(current)) {
release_sock(sk);
schedule();
lock_sock(sk);
continue;
}
err = -ERESTARTSYS;
break;
}
finish_wait(sk_sleep(sk), &wait);
if (err)
goto out_release;
newsk = skb->sk;
sock_graft(newsk, newsock);
/* Now attach up the new socket */
skb->sk = NULL;
kfree_skb(skb);
sk_acceptq_removed(sk);
out_release:
release_sock(sk);
return err;
}
static int rose_getname(struct socket *sock, struct sockaddr *uaddr,
int peer)
{
struct full_sockaddr_rose *srose = (struct full_sockaddr_rose *)uaddr;
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
int n;
memset(srose, 0, sizeof(*srose));
if (peer != 0) {
if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
srose->srose_family = AF_ROSE;
srose->srose_addr = rose->dest_addr;
srose->srose_call = rose->dest_call;
srose->srose_ndigis = rose->dest_ndigis;
for (n = 0; n < rose->dest_ndigis; n++)
srose->srose_digis[n] = rose->dest_digis[n];
} else {
srose->srose_family = AF_ROSE;
srose->srose_addr = rose->source_addr;
srose->srose_call = rose->source_call;
srose->srose_ndigis = rose->source_ndigis;
for (n = 0; n < rose->source_ndigis; n++)
srose->srose_digis[n] = rose->source_digis[n];
}
return sizeof(struct full_sockaddr_rose);
}
int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct rose_neigh *neigh, unsigned int lci)
{
struct sock *sk;
struct sock *make;
struct rose_sock *make_rose;
struct rose_facilities_struct facilities;
int n;
skb->sk = NULL; /* Initially we don't know who it's for */
/*
* skb->data points to the rose frame start
*/
memset(&facilities, 0x00, sizeof(struct rose_facilities_struct));
if (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF,
skb->len - ROSE_CALL_REQ_FACILITIES_OFF,
&facilities)) {
rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76);
return 0;
}
sk = rose_find_listener(&facilities.source_addr, &facilities.source_call);
/*
* We can't accept the Call Request.
*/
if (sk == NULL || sk_acceptq_is_full(sk) ||
(make = rose_make_new(sk)) == NULL) {
rose_transmit_clear_request(neigh, lci, ROSE_NETWORK_CONGESTION, 120);
return 0;
}
skb->sk = make;
make->sk_state = TCP_ESTABLISHED;
make_rose = rose_sk(make);
make_rose->lci = lci;
make_rose->dest_addr = facilities.dest_addr;
make_rose->dest_call = facilities.dest_call;
make_rose->dest_ndigis = facilities.dest_ndigis;
for (n = 0 ; n < facilities.dest_ndigis ; n++)
make_rose->dest_digis[n] = facilities.dest_digis[n];
make_rose->source_addr = facilities.source_addr;
make_rose->source_call = facilities.source_call;
make_rose->source_ndigis = facilities.source_ndigis;
for (n = 0 ; n < facilities.source_ndigis ; n++)
make_rose->source_digis[n] = facilities.source_digis[n];
make_rose->neighbour = neigh;
make_rose->device = dev;
/* Caller got a reference for us. */
netdev_tracker_alloc(make_rose->device, &make_rose->dev_tracker,
GFP_ATOMIC);
make_rose->facilities = facilities;
make_rose->neighbour->use++;
if (rose_sk(sk)->defer) {
make_rose->state = ROSE_STATE_5;
} else {
rose_write_internal(make, ROSE_CALL_ACCEPTED);
make_rose->state = ROSE_STATE_3;
rose_start_idletimer(make);
}
make_rose->condition = 0x00;
make_rose->vs = 0;
make_rose->va = 0;
make_rose->vr = 0;
make_rose->vl = 0;
sk_acceptq_added(sk);
rose_insert_socket(make);
skb_queue_head(&sk->sk_receive_queue, skb);
rose_start_heartbeat(make);
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk);
return 1;
}
static int rose_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_rose *, usrose, msg->msg_name);
int err;
struct full_sockaddr_rose srose;
struct sk_buff *skb;
unsigned char *asmptr;
int n, size, qbit = 0;
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
return -EINVAL;
if (sock_flag(sk, SOCK_ZAPPED))
return -EADDRNOTAVAIL;
if (sk->sk_shutdown & SEND_SHUTDOWN) {
send_sig(SIGPIPE, current, 0);
return -EPIPE;
}
if (rose->neighbour == NULL || rose->device == NULL)
return -ENETUNREACH;
if (usrose != NULL) {
if (msg->msg_namelen != sizeof(struct sockaddr_rose) && msg->msg_namelen != sizeof(struct full_sockaddr_rose))
return -EINVAL;
memset(&srose, 0, sizeof(struct full_sockaddr_rose));
memcpy(&srose, usrose, msg->msg_namelen);
if (rosecmp(&rose->dest_addr, &srose.srose_addr) != 0 ||
ax25cmp(&rose->dest_call, &srose.srose_call) != 0)
return -EISCONN;
if (srose.srose_ndigis != rose->dest_ndigis)
return -EISCONN;
if (srose.srose_ndigis == rose->dest_ndigis) {
for (n = 0 ; n < srose.srose_ndigis ; n++)
if (ax25cmp(&rose->dest_digis[n],
&srose.srose_digis[n]))
return -EISCONN;
}
if (srose.srose_family != AF_ROSE)
return -EINVAL;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
srose.srose_family = AF_ROSE;
srose.srose_addr = rose->dest_addr;
srose.srose_call = rose->dest_call;
srose.srose_ndigis = rose->dest_ndigis;
for (n = 0 ; n < rose->dest_ndigis ; n++)
srose.srose_digis[n] = rose->dest_digis[n];
}
/* Build a packet */
/* Sanity check the packet size */
if (len > 65535)
return -EMSGSIZE;
size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN;
if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
return err;
skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN);
/*
* Put the data on the end
*/
skb_reset_transport_header(skb);
skb_put(skb, len);
err = memcpy_from_msg(skb_transport_header(skb), msg, len);
if (err) {
kfree_skb(skb);
return err;
}
/*
* If the Q BIT Include socket option is in force, the first
* byte of the user data is the logical value of the Q Bit.
*/
if (rose->qbitincl) {
qbit = skb->data[0];
skb_pull(skb, 1);
}
/*
* Push down the ROSE header
*/
asmptr = skb_push(skb, ROSE_MIN_LEN);
/* Build a ROSE Network header */
asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI;
asmptr[1] = (rose->lci >> 0) & 0xFF;
asmptr[2] = ROSE_DATA;
if (qbit)
asmptr[0] |= ROSE_Q_BIT;
if (sk->sk_state != TCP_ESTABLISHED) {
kfree_skb(skb);
return -ENOTCONN;
}
#ifdef M_BIT
#define ROSE_PACLEN (256-ROSE_MIN_LEN)
if (skb->len - ROSE_MIN_LEN > ROSE_PACLEN) {
unsigned char header[ROSE_MIN_LEN];
struct sk_buff *skbn;
int frontlen;
int lg;
/* Save a copy of the Header */
skb_copy_from_linear_data(skb, header, ROSE_MIN_LEN);
skb_pull(skb, ROSE_MIN_LEN);
frontlen = skb_headroom(skb);
while (skb->len > 0) {
if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) {
kfree_skb(skb);
return err;
}
skbn->sk = sk;
skbn->free = 1;
skbn->arp = 1;
skb_reserve(skbn, frontlen);
lg = (ROSE_PACLEN > skb->len) ? skb->len : ROSE_PACLEN;
/* Copy the user data */
skb_copy_from_linear_data(skb, skb_put(skbn, lg), lg);
skb_pull(skb, lg);
/* Duplicate the Header */
skb_push(skbn, ROSE_MIN_LEN);
skb_copy_to_linear_data(skbn, header, ROSE_MIN_LEN);
if (skb->len > 0)
skbn->data[2] |= M_BIT;
skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */
}
skb->free = 1;
kfree_skb(skb);
} else {
skb_queue_tail(&sk->sk_write_queue, skb); /* Throw it on the queue */
}
#else
skb_queue_tail(&sk->sk_write_queue, skb); /* Shove it onto the queue */
#endif
rose_kick(sk);
return len;
}
static int rose_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int flags)
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
size_t copied;
unsigned char *asmptr;
struct sk_buff *skb;
int n, er, qbit;
/*
* This works for seqpacket too. The receiver has ordered the queue for
* us! We do one quick check first though
*/
if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
/* Now we can treat all alike */
skb = skb_recv_datagram(sk, flags, &er);
if (!skb)
return er;
qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT;
skb_pull(skb, ROSE_MIN_LEN);
if (rose->qbitincl) {
asmptr = skb_push(skb, 1);
*asmptr = qbit;
}
skb_reset_transport_header(skb);
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
skb_copy_datagram_msg(skb, 0, msg, copied);
if (msg->msg_name) {
struct sockaddr_rose *srose;
DECLARE_SOCKADDR(struct full_sockaddr_rose *, full_srose,
msg->msg_name);
memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose));
srose = msg->msg_name;
srose->srose_family = AF_ROSE;
srose->srose_addr = rose->dest_addr;
srose->srose_call = rose->dest_call;
srose->srose_ndigis = rose->dest_ndigis;
for (n = 0 ; n < rose->dest_ndigis ; n++)
full_srose->srose_digis[n] = rose->dest_digis[n];
msg->msg_namelen = sizeof(struct full_sockaddr_rose);
}
skb_free_datagram(sk, skb);
return copied;
}
static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
void __user *argp = (void __user *)arg;
switch (cmd) {
case TIOCOUTQ: {
long amount;
amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
if (amount < 0)
amount = 0;
return put_user(amount, (unsigned int __user *) argp);
}
case TIOCINQ: {
struct sk_buff *skb;
long amount = 0L;
/* These two are safe on a single CPU system as only user tasks fiddle here */
if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
amount = skb->len;
return put_user(amount, (unsigned int __user *) argp);
}
case SIOCGIFADDR:
case SIOCSIFADDR:
case SIOCGIFDSTADDR:
case SIOCSIFDSTADDR:
case SIOCGIFBRDADDR:
case SIOCSIFBRDADDR:
case SIOCGIFNETMASK:
case SIOCSIFNETMASK:
case SIOCGIFMETRIC:
case SIOCSIFMETRIC:
return -EINVAL;
case SIOCADDRT:
case SIOCDELRT:
case SIOCRSCLRRT:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
return rose_rt_ioctl(cmd, argp);
case SIOCRSGCAUSE: {
struct rose_cause_struct rose_cause;
rose_cause.cause = rose->cause;
rose_cause.diagnostic = rose->diagnostic;
return copy_to_user(argp, &rose_cause, sizeof(struct rose_cause_struct)) ? -EFAULT : 0;
}
case SIOCRSSCAUSE: {
struct rose_cause_struct rose_cause;
if (copy_from_user(&rose_cause, argp, sizeof(struct rose_cause_struct)))
return -EFAULT;
rose->cause = rose_cause.cause;
rose->diagnostic = rose_cause.diagnostic;
return 0;
}
case SIOCRSSL2CALL:
if (!capable(CAP_NET_ADMIN)) return -EPERM;
if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
ax25_listen_release(&rose_callsign, NULL);
if (copy_from_user(&rose_callsign, argp, sizeof(ax25_address)))
return -EFAULT;
if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
return ax25_listen_register(&rose_callsign, NULL);
return 0;
case SIOCRSGL2CALL:
return copy_to_user(argp, &rose_callsign, sizeof(ax25_address)) ? -EFAULT : 0;
case SIOCRSACCEPT:
if (rose->state == ROSE_STATE_5) {
rose_write_internal(sk, ROSE_CALL_ACCEPTED);
rose_start_idletimer(sk);
rose->condition = 0x00;
rose->vs = 0;
rose->va = 0;
rose->vr = 0;
rose->vl = 0;
rose->state = ROSE_STATE_3;
}
return 0;
default:
return -ENOIOCTLCMD;
}
return 0;
}
#ifdef CONFIG_PROC_FS
static void *rose_info_start(struct seq_file *seq, loff_t *pos)
__acquires(rose_list_lock)
{
spin_lock_bh(&rose_list_lock);
return seq_hlist_start_head(&rose_list, *pos);
}
static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos)
{
return seq_hlist_next(v, &rose_list, pos);
}
static void rose_info_stop(struct seq_file *seq, void *v)
__releases(rose_list_lock)
{
spin_unlock_bh(&rose_list_lock);
}
static int rose_info_show(struct seq_file *seq, void *v)
{
char buf[11], rsbuf[11];
if (v == SEQ_START_TOKEN)
seq_puts(seq,
"dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n");
else {
struct sock *s = sk_entry(v);
struct rose_sock *rose = rose_sk(s);
const char *devname, *callsign;
const struct net_device *dev = rose->device;
if (!dev)
devname = "???";
else
devname = dev->name;
seq_printf(seq, "%-10s %-9s ",
rose2asc(rsbuf, &rose->dest_addr),
ax2asc(buf, &rose->dest_call));
if (ax25cmp(&rose->source_call, &null_ax25_address) == 0)
callsign = "??????-?";
else
callsign = ax2asc(buf, &rose->source_call);
seq_printf(seq,
"%-10s %-9s %-5s %3.3X %05d %d %d %d %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n",
rose2asc(rsbuf, &rose->source_addr),
callsign,
devname,
rose->lci & 0x0FFF,
(rose->neighbour) ? rose->neighbour->number : 0,
rose->state,
rose->vs,
rose->vr,
rose->va,
ax25_display_timer(&rose->timer) / HZ,
rose->t1 / HZ,
rose->t2 / HZ,
rose->t3 / HZ,
rose->hb / HZ,
ax25_display_timer(&rose->idletimer) / (60 * HZ),
rose->idle / (60 * HZ),
sk_wmem_alloc_get(s),
sk_rmem_alloc_get(s),
s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
}
return 0;
}
static const struct seq_operations rose_info_seqops = {
.start = rose_info_start,
.next = rose_info_next,
.stop = rose_info_stop,
.show = rose_info_show,
};
#endif /* CONFIG_PROC_FS */
static const struct net_proto_family rose_family_ops = {
.family = PF_ROSE,
.create = rose_create,
.owner = THIS_MODULE,
};
static const struct proto_ops rose_proto_ops = {
.family = PF_ROSE,
.owner = THIS_MODULE,
.release = rose_release,
.bind = rose_bind,
.connect = rose_connect,
.socketpair = sock_no_socketpair,
.accept = rose_accept,
.getname = rose_getname,
.poll = datagram_poll,
.ioctl = rose_ioctl,
.gettstamp = sock_gettstamp,
.listen = rose_listen,
.shutdown = sock_no_shutdown,
.setsockopt = rose_setsockopt,
.getsockopt = rose_getsockopt,
.sendmsg = rose_sendmsg,
.recvmsg = rose_recvmsg,
.mmap = sock_no_mmap,
};
static struct notifier_block rose_dev_notifier = {
.notifier_call = rose_device_event,
};
static struct net_device **dev_rose;
static struct ax25_protocol rose_pid = {
.pid = AX25_P_ROSE,
.func = rose_route_frame
};
static struct ax25_linkfail rose_linkfail_notifier = {
.func = rose_link_failed
};
static int __init rose_proto_init(void)
{
int i;
int rc;
if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) {
printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter too large\n");
rc = -EINVAL;
goto out;
}
rc = proto_register(&rose_proto, 0);
if (rc != 0)
goto out;
rose_callsign = null_ax25_address;
dev_rose = kcalloc(rose_ndevs, sizeof(struct net_device *),
GFP_KERNEL);
if (dev_rose == NULL) {
printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n");
rc = -ENOMEM;
goto out_proto_unregister;
}
for (i = 0; i < rose_ndevs; i++) {
struct net_device *dev;
char name[IFNAMSIZ];
sprintf(name, "rose%d", i);
dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, rose_setup);
if (!dev) {
printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n");
rc = -ENOMEM;
goto fail;
}
rc = register_netdev(dev);
if (rc) {
printk(KERN_ERR "ROSE: netdevice registration failed\n");
free_netdev(dev);
goto fail;
}
rose_set_lockdep_key(dev);
dev_rose[i] = dev;
}
sock_register(&rose_family_ops);
register_netdevice_notifier(&rose_dev_notifier);
ax25_register_pid(&rose_pid);
ax25_linkfail_register(&rose_linkfail_notifier);
#ifdef CONFIG_SYSCTL
rose_register_sysctl();
#endif
rose_loopback_init();
rose_add_loopback_neigh();
proc_create_seq("rose", 0444, init_net.proc_net, &rose_info_seqops);
proc_create_seq("rose_neigh", 0444, init_net.proc_net,
&rose_neigh_seqops);
proc_create_seq("rose_nodes", 0444, init_net.proc_net,
&rose_node_seqops);
proc_create_seq("rose_routes", 0444, init_net.proc_net,
&rose_route_seqops);
out:
return rc;
fail:
while (--i >= 0) {
unregister_netdev(dev_rose[i]);
free_netdev(dev_rose[i]);
}
kfree(dev_rose);
out_proto_unregister:
proto_unregister(&rose_proto);
goto out;
}
module_init(rose_proto_init);
module_param(rose_ndevs, int, 0);
MODULE_PARM_DESC(rose_ndevs, "number of ROSE devices");
MODULE_AUTHOR("Jonathan Naylor G4KLX <[email protected]>");
MODULE_DESCRIPTION("The amateur radio ROSE network layer protocol");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_ROSE);
static void __exit rose_exit(void)
{
int i;
remove_proc_entry("rose", init_net.proc_net);
remove_proc_entry("rose_neigh", init_net.proc_net);
remove_proc_entry("rose_nodes", init_net.proc_net);
remove_proc_entry("rose_routes", init_net.proc_net);
rose_loopback_clear();
rose_rt_free();
ax25_protocol_release(AX25_P_ROSE);
ax25_linkfail_release(&rose_linkfail_notifier);
if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
ax25_listen_release(&rose_callsign, NULL);
#ifdef CONFIG_SYSCTL
rose_unregister_sysctl();
#endif
unregister_netdevice_notifier(&rose_dev_notifier);
sock_unregister(PF_ROSE);
for (i = 0; i < rose_ndevs; i++) {
struct net_device *dev = dev_rose[i];
if (dev) {
unregister_netdev(dev);
free_netdev(dev);
}
}
kfree(dev_rose);
proto_unregister(&rose_proto);
}
module_exit(rose_exit);
| linux-master | net/rose/af_rose.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) Jonathan Naylor G4KLX ([email protected])
*
* Most of this code is based on the SDL diagrams published in the 7th ARRL
* Computer Networking Conference papers. The diagrams have mistakes in them,
* but are mostly correct. Before you modify the code could you read the SDL
* diagrams as the code is not obvious and probably very easy to break.
*/
#include <linux/errno.h>
#include <linux/filter.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <net/rose.h>
/*
* State machine for state 1, Awaiting Call Accepted State.
* The handling of the timer(s) is in file rose_timer.c.
* Handling of state 0 and connection release is in af_rose.c.
*/
static int rose_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype)
{
struct rose_sock *rose = rose_sk(sk);
switch (frametype) {
case ROSE_CALL_ACCEPTED:
rose_stop_timer(sk);
rose_start_idletimer(sk);
rose->condition = 0x00;
rose->vs = 0;
rose->va = 0;
rose->vr = 0;
rose->vl = 0;
rose->state = ROSE_STATE_3;
sk->sk_state = TCP_ESTABLISHED;
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_state_change(sk);
break;
case ROSE_CLEAR_REQUEST:
rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION);
rose_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]);
rose->neighbour->use--;
break;
default:
break;
}
return 0;
}
/*
* State machine for state 2, Awaiting Clear Confirmation State.
* The handling of the timer(s) is in file rose_timer.c
* Handling of state 0 and connection release is in af_rose.c.
*/
static int rose_state2_machine(struct sock *sk, struct sk_buff *skb, int frametype)
{
struct rose_sock *rose = rose_sk(sk);
switch (frametype) {
case ROSE_CLEAR_REQUEST:
rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION);
rose_disconnect(sk, 0, skb->data[3], skb->data[4]);
rose->neighbour->use--;
break;
case ROSE_CLEAR_CONFIRMATION:
rose_disconnect(sk, 0, -1, -1);
rose->neighbour->use--;
break;
default:
break;
}
return 0;
}
/*
* State machine for state 3, Connected State.
* The handling of the timer(s) is in file rose_timer.c
* Handling of state 0 and connection release is in af_rose.c.
*/
static int rose_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype, int ns, int nr, int q, int d, int m)
{
struct rose_sock *rose = rose_sk(sk);
int queued = 0;
switch (frametype) {
case ROSE_RESET_REQUEST:
rose_stop_timer(sk);
rose_start_idletimer(sk);
rose_write_internal(sk, ROSE_RESET_CONFIRMATION);
rose->condition = 0x00;
rose->vs = 0;
rose->vr = 0;
rose->va = 0;
rose->vl = 0;
rose_requeue_frames(sk);
break;
case ROSE_CLEAR_REQUEST:
rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION);
rose_disconnect(sk, 0, skb->data[3], skb->data[4]);
rose->neighbour->use--;
break;
case ROSE_RR:
case ROSE_RNR:
if (!rose_validate_nr(sk, nr)) {
rose_write_internal(sk, ROSE_RESET_REQUEST);
rose->condition = 0x00;
rose->vs = 0;
rose->vr = 0;
rose->va = 0;
rose->vl = 0;
rose->state = ROSE_STATE_4;
rose_start_t2timer(sk);
rose_stop_idletimer(sk);
} else {
rose_frames_acked(sk, nr);
if (frametype == ROSE_RNR) {
rose->condition |= ROSE_COND_PEER_RX_BUSY;
} else {
rose->condition &= ~ROSE_COND_PEER_RX_BUSY;
}
}
break;
case ROSE_DATA: /* XXX */
rose->condition &= ~ROSE_COND_PEER_RX_BUSY;
if (!rose_validate_nr(sk, nr)) {
rose_write_internal(sk, ROSE_RESET_REQUEST);
rose->condition = 0x00;
rose->vs = 0;
rose->vr = 0;
rose->va = 0;
rose->vl = 0;
rose->state = ROSE_STATE_4;
rose_start_t2timer(sk);
rose_stop_idletimer(sk);
break;
}
rose_frames_acked(sk, nr);
if (ns == rose->vr) {
rose_start_idletimer(sk);
if (sk_filter_trim_cap(sk, skb, ROSE_MIN_LEN) == 0 &&
__sock_queue_rcv_skb(sk, skb) == 0) {
rose->vr = (rose->vr + 1) % ROSE_MODULUS;
queued = 1;
} else {
/* Should never happen ! */
rose_write_internal(sk, ROSE_RESET_REQUEST);
rose->condition = 0x00;
rose->vs = 0;
rose->vr = 0;
rose->va = 0;
rose->vl = 0;
rose->state = ROSE_STATE_4;
rose_start_t2timer(sk);
rose_stop_idletimer(sk);
break;
}
if (atomic_read(&sk->sk_rmem_alloc) >
(sk->sk_rcvbuf >> 1))
rose->condition |= ROSE_COND_OWN_RX_BUSY;
}
/*
* If the window is full, ack the frame, else start the
* acknowledge hold back timer.
*/
if (((rose->vl + sysctl_rose_window_size) % ROSE_MODULUS) == rose->vr) {
rose->condition &= ~ROSE_COND_ACK_PENDING;
rose_stop_timer(sk);
rose_enquiry_response(sk);
} else {
rose->condition |= ROSE_COND_ACK_PENDING;
rose_start_hbtimer(sk);
}
break;
default:
printk(KERN_WARNING "ROSE: unknown %02X in state 3\n", frametype);
break;
}
return queued;
}
/*
* State machine for state 4, Awaiting Reset Confirmation State.
* The handling of the timer(s) is in file rose_timer.c
* Handling of state 0 and connection release is in af_rose.c.
*/
static int rose_state4_machine(struct sock *sk, struct sk_buff *skb, int frametype)
{
struct rose_sock *rose = rose_sk(sk);
switch (frametype) {
case ROSE_RESET_REQUEST:
rose_write_internal(sk, ROSE_RESET_CONFIRMATION);
fallthrough;
case ROSE_RESET_CONFIRMATION:
rose_stop_timer(sk);
rose_start_idletimer(sk);
rose->condition = 0x00;
rose->va = 0;
rose->vr = 0;
rose->vs = 0;
rose->vl = 0;
rose->state = ROSE_STATE_3;
rose_requeue_frames(sk);
break;
case ROSE_CLEAR_REQUEST:
rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION);
rose_disconnect(sk, 0, skb->data[3], skb->data[4]);
rose->neighbour->use--;
break;
default:
break;
}
return 0;
}
/*
* State machine for state 5, Awaiting Call Acceptance State.
* The handling of the timer(s) is in file rose_timer.c
* Handling of state 0 and connection release is in af_rose.c.
*/
static int rose_state5_machine(struct sock *sk, struct sk_buff *skb, int frametype)
{
if (frametype == ROSE_CLEAR_REQUEST) {
rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION);
rose_disconnect(sk, 0, skb->data[3], skb->data[4]);
rose_sk(sk)->neighbour->use--;
}
return 0;
}
/* Higher level upcall for a LAPB frame */
int rose_process_rx_frame(struct sock *sk, struct sk_buff *skb)
{
struct rose_sock *rose = rose_sk(sk);
int queued = 0, frametype, ns, nr, q, d, m;
if (rose->state == ROSE_STATE_0)
return 0;
frametype = rose_decode(skb, &ns, &nr, &q, &d, &m);
switch (rose->state) {
case ROSE_STATE_1:
queued = rose_state1_machine(sk, skb, frametype);
break;
case ROSE_STATE_2:
queued = rose_state2_machine(sk, skb, frametype);
break;
case ROSE_STATE_3:
queued = rose_state3_machine(sk, skb, frametype, ns, nr, q, d, m);
break;
case ROSE_STATE_4:
queued = rose_state4_machine(sk, skb, frametype);
break;
case ROSE_STATE_5:
queued = rose_state5_machine(sk, skb, frametype);
break;
}
rose_kick(sk);
return queued;
}
| linux-master | net/rose/rose_in.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) Jonathan Naylor G4KLX ([email protected])
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/socket.h>
#include <linux/timer.h>
#include <net/ax25.h>
#include <linux/skbuff.h>
#include <net/rose.h>
#include <linux/init.h>
static struct sk_buff_head loopback_queue;
#define ROSE_LOOPBACK_LIMIT 1000
static struct timer_list loopback_timer;
static void rose_set_loopback_timer(void);
static void rose_loopback_timer(struct timer_list *unused);
void rose_loopback_init(void)
{
skb_queue_head_init(&loopback_queue);
timer_setup(&loopback_timer, rose_loopback_timer, 0);
}
static int rose_loopback_running(void)
{
return timer_pending(&loopback_timer);
}
int rose_loopback_queue(struct sk_buff *skb, struct rose_neigh *neigh)
{
struct sk_buff *skbn = NULL;
if (skb_queue_len(&loopback_queue) < ROSE_LOOPBACK_LIMIT)
skbn = skb_clone(skb, GFP_ATOMIC);
if (skbn) {
consume_skb(skb);
skb_queue_tail(&loopback_queue, skbn);
if (!rose_loopback_running())
rose_set_loopback_timer();
} else {
kfree_skb(skb);
}
return 1;
}
static void rose_set_loopback_timer(void)
{
mod_timer(&loopback_timer, jiffies + 10);
}
static void rose_loopback_timer(struct timer_list *unused)
{
struct sk_buff *skb;
struct net_device *dev;
rose_address *dest;
struct sock *sk;
unsigned short frametype;
unsigned int lci_i, lci_o;
int count;
for (count = 0; count < ROSE_LOOPBACK_LIMIT; count++) {
skb = skb_dequeue(&loopback_queue);
if (!skb)
return;
if (skb->len < ROSE_MIN_LEN) {
kfree_skb(skb);
continue;
}
lci_i = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
frametype = skb->data[2];
if (frametype == ROSE_CALL_REQUEST &&
(skb->len <= ROSE_CALL_REQ_FACILITIES_OFF ||
skb->data[ROSE_CALL_REQ_ADDR_LEN_OFF] !=
ROSE_CALL_REQ_ADDR_LEN_VAL)) {
kfree_skb(skb);
continue;
}
dest = (rose_address *)(skb->data + ROSE_CALL_REQ_DEST_ADDR_OFF);
lci_o = ROSE_DEFAULT_MAXVC + 1 - lci_i;
skb_reset_transport_header(skb);
sk = rose_find_socket(lci_o, rose_loopback_neigh);
if (sk) {
if (rose_process_rx_frame(sk, skb) == 0)
kfree_skb(skb);
continue;
}
if (frametype == ROSE_CALL_REQUEST) {
if (!rose_loopback_neigh->dev &&
!rose_loopback_neigh->loopback) {
kfree_skb(skb);
continue;
}
dev = rose_dev_get(dest);
if (!dev) {
kfree_skb(skb);
continue;
}
if (rose_rx_call_request(skb, dev, rose_loopback_neigh, lci_o) == 0) {
dev_put(dev);
kfree_skb(skb);
}
} else {
kfree_skb(skb);
}
}
if (!skb_queue_empty(&loopback_queue))
mod_timer(&loopback_timer, jiffies + 1);
}
void __exit rose_loopback_clear(void)
{
struct sk_buff *skb;
del_timer(&loopback_timer);
while ((skb = skb_dequeue(&loopback_queue)) != NULL) {
skb->sk = NULL;
kfree_skb(skb);
}
}
| linux-master | net/rose/rose_loopback.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) 1996 Mike Shaver ([email protected])
*/
#include <linux/mm.h>
#include <linux/sysctl.h>
#include <linux/init.h>
#include <net/ax25.h>
#include <net/rose.h>
static int min_timer[] = {1 * HZ};
static int max_timer[] = {300 * HZ};
static int min_idle[] = {0 * HZ};
static int max_idle[] = {65535 * HZ};
static int min_route[1], max_route[] = {1};
static int min_ftimer[] = {60 * HZ};
static int max_ftimer[] = {600 * HZ};
static int min_maxvcs[] = {1}, max_maxvcs[] = {254};
static int min_window[] = {1}, max_window[] = {7};
static struct ctl_table_header *rose_table_header;
static struct ctl_table rose_table[] = {
{
.procname = "restart_request_timeout",
.data = &sysctl_rose_restart_request_timeout,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_timer,
.extra2 = &max_timer
},
{
.procname = "call_request_timeout",
.data = &sysctl_rose_call_request_timeout,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_timer,
.extra2 = &max_timer
},
{
.procname = "reset_request_timeout",
.data = &sysctl_rose_reset_request_timeout,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_timer,
.extra2 = &max_timer
},
{
.procname = "clear_request_timeout",
.data = &sysctl_rose_clear_request_timeout,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_timer,
.extra2 = &max_timer
},
{
.procname = "no_activity_timeout",
.data = &sysctl_rose_no_activity_timeout,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_idle,
.extra2 = &max_idle
},
{
.procname = "acknowledge_hold_back_timeout",
.data = &sysctl_rose_ack_hold_back_timeout,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_timer,
.extra2 = &max_timer
},
{
.procname = "routing_control",
.data = &sysctl_rose_routing_control,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_route,
.extra2 = &max_route
},
{
.procname = "link_fail_timeout",
.data = &sysctl_rose_link_fail_timeout,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_ftimer,
.extra2 = &max_ftimer
},
{
.procname = "maximum_virtual_circuits",
.data = &sysctl_rose_maximum_vcs,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_maxvcs,
.extra2 = &max_maxvcs
},
{
.procname = "window_size",
.data = &sysctl_rose_window_size,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_window,
.extra2 = &max_window
},
{ }
};
void __init rose_register_sysctl(void)
{
rose_table_header = register_net_sysctl(&init_net, "net/rose", rose_table);
}
void rose_unregister_sysctl(void)
{
unregister_net_sysctl_table(rose_table_header);
}
| linux-master | net/rose/sysctl_net_rose.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) Jonathan Naylor G4KLX ([email protected])
*/
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/sysctl.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/errno.h>
#include <linux/fcntl.h>
#include <linux/in.h>
#include <linux/if_ether.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <net/ip.h>
#include <net/arp.h>
#include <net/ax25.h>
#include <net/rose.h>
static int rose_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr, unsigned int len)
{
unsigned char *buff = skb_push(skb, ROSE_MIN_LEN + 2);
if (daddr)
memcpy(buff + 7, daddr, dev->addr_len);
*buff++ = ROSE_GFI | ROSE_Q_BIT;
*buff++ = 0x00;
*buff++ = ROSE_DATA;
*buff++ = 0x7F;
*buff++ = AX25_P_IP;
if (daddr != NULL)
return 37;
return -37;
}
static int rose_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sa = addr;
int err;
if (!memcmp(dev->dev_addr, sa->sa_data, dev->addr_len))
return 0;
if (dev->flags & IFF_UP) {
err = rose_add_loopback_node((rose_address *)sa->sa_data);
if (err)
return err;
rose_del_loopback_node((const rose_address *)dev->dev_addr);
}
dev_addr_set(dev, sa->sa_data);
return 0;
}
static int rose_open(struct net_device *dev)
{
int err;
err = rose_add_loopback_node((const rose_address *)dev->dev_addr);
if (err)
return err;
netif_start_queue(dev);
return 0;
}
static int rose_close(struct net_device *dev)
{
netif_stop_queue(dev);
rose_del_loopback_node((const rose_address *)dev->dev_addr);
return 0;
}
static netdev_tx_t rose_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct net_device_stats *stats = &dev->stats;
unsigned int len = skb->len;
if (!netif_running(dev)) {
printk(KERN_ERR "ROSE: rose_xmit - called when iface is down\n");
return NETDEV_TX_BUSY;
}
if (!rose_route_frame(skb, NULL)) {
dev_kfree_skb(skb);
stats->tx_errors++;
return NETDEV_TX_OK;
}
stats->tx_packets++;
stats->tx_bytes += len;
return NETDEV_TX_OK;
}
static const struct header_ops rose_header_ops = {
.create = rose_header,
};
static const struct net_device_ops rose_netdev_ops = {
.ndo_open = rose_open,
.ndo_stop = rose_close,
.ndo_start_xmit = rose_xmit,
.ndo_set_mac_address = rose_set_mac_address,
};
void rose_setup(struct net_device *dev)
{
dev->mtu = ROSE_MAX_PACKET_SIZE - 2;
dev->netdev_ops = &rose_netdev_ops;
dev->header_ops = &rose_header_ops;
dev->hard_header_len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN;
dev->addr_len = ROSE_ADDR_LEN;
dev->type = ARPHRD_ROSE;
/* New-style flags. */
dev->flags = IFF_NOARP;
}
| linux-master | net/rose/rose_dev.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) Jonathan Naylor G4KLX ([email protected])
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <net/rose.h>
static int rose_create_facilities(unsigned char *buffer, struct rose_sock *rose);
/*
* This routine purges all of the queues of frames.
*/
void rose_clear_queues(struct sock *sk)
{
skb_queue_purge(&sk->sk_write_queue);
skb_queue_purge(&rose_sk(sk)->ack_queue);
}
/*
* This routine purges the input queue of those frames that have been
* acknowledged. This replaces the boxes labelled "V(a) <- N(r)" on the
* SDL diagram.
*/
void rose_frames_acked(struct sock *sk, unsigned short nr)
{
struct sk_buff *skb;
struct rose_sock *rose = rose_sk(sk);
/*
* Remove all the ack-ed frames from the ack queue.
*/
if (rose->va != nr) {
while (skb_peek(&rose->ack_queue) != NULL && rose->va != nr) {
skb = skb_dequeue(&rose->ack_queue);
kfree_skb(skb);
rose->va = (rose->va + 1) % ROSE_MODULUS;
}
}
}
void rose_requeue_frames(struct sock *sk)
{
struct sk_buff *skb, *skb_prev = NULL;
/*
* Requeue all the un-ack-ed frames on the output queue to be picked
* up by rose_kick. This arrangement handles the possibility of an
* empty output queue.
*/
while ((skb = skb_dequeue(&rose_sk(sk)->ack_queue)) != NULL) {
if (skb_prev == NULL)
skb_queue_head(&sk->sk_write_queue, skb);
else
skb_append(skb_prev, skb, &sk->sk_write_queue);
skb_prev = skb;
}
}
/*
* Validate that the value of nr is between va and vs. Return true or
* false for testing.
*/
int rose_validate_nr(struct sock *sk, unsigned short nr)
{
struct rose_sock *rose = rose_sk(sk);
unsigned short vc = rose->va;
while (vc != rose->vs) {
if (nr == vc) return 1;
vc = (vc + 1) % ROSE_MODULUS;
}
return nr == rose->vs;
}
/*
* This routine is called when the packet layer internally generates a
* control frame.
*/
void rose_write_internal(struct sock *sk, int frametype)
{
struct rose_sock *rose = rose_sk(sk);
struct sk_buff *skb;
unsigned char *dptr;
unsigned char lci1, lci2;
int maxfaclen = 0;
int len, faclen;
int reserve;
reserve = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1;
len = ROSE_MIN_LEN;
switch (frametype) {
case ROSE_CALL_REQUEST:
len += 1 + ROSE_ADDR_LEN + ROSE_ADDR_LEN;
maxfaclen = 256;
break;
case ROSE_CALL_ACCEPTED:
case ROSE_CLEAR_REQUEST:
case ROSE_RESET_REQUEST:
len += 2;
break;
}
skb = alloc_skb(reserve + len + maxfaclen, GFP_ATOMIC);
if (!skb)
return;
/*
* Space for AX.25 header and PID.
*/
skb_reserve(skb, reserve);
dptr = skb_put(skb, len);
lci1 = (rose->lci >> 8) & 0x0F;
lci2 = (rose->lci >> 0) & 0xFF;
switch (frametype) {
case ROSE_CALL_REQUEST:
*dptr++ = ROSE_GFI | lci1;
*dptr++ = lci2;
*dptr++ = frametype;
*dptr++ = ROSE_CALL_REQ_ADDR_LEN_VAL;
memcpy(dptr, &rose->dest_addr, ROSE_ADDR_LEN);
dptr += ROSE_ADDR_LEN;
memcpy(dptr, &rose->source_addr, ROSE_ADDR_LEN);
dptr += ROSE_ADDR_LEN;
faclen = rose_create_facilities(dptr, rose);
skb_put(skb, faclen);
dptr += faclen;
break;
case ROSE_CALL_ACCEPTED:
*dptr++ = ROSE_GFI | lci1;
*dptr++ = lci2;
*dptr++ = frametype;
*dptr++ = 0x00; /* Address length */
*dptr++ = 0; /* Facilities length */
break;
case ROSE_CLEAR_REQUEST:
*dptr++ = ROSE_GFI | lci1;
*dptr++ = lci2;
*dptr++ = frametype;
*dptr++ = rose->cause;
*dptr++ = rose->diagnostic;
break;
case ROSE_RESET_REQUEST:
*dptr++ = ROSE_GFI | lci1;
*dptr++ = lci2;
*dptr++ = frametype;
*dptr++ = ROSE_DTE_ORIGINATED;
*dptr++ = 0;
break;
case ROSE_RR:
case ROSE_RNR:
*dptr++ = ROSE_GFI | lci1;
*dptr++ = lci2;
*dptr = frametype;
*dptr++ |= (rose->vr << 5) & 0xE0;
break;
case ROSE_CLEAR_CONFIRMATION:
case ROSE_RESET_CONFIRMATION:
*dptr++ = ROSE_GFI | lci1;
*dptr++ = lci2;
*dptr++ = frametype;
break;
default:
printk(KERN_ERR "ROSE: rose_write_internal - invalid frametype %02X\n", frametype);
kfree_skb(skb);
return;
}
rose_transmit_link(skb, rose->neighbour);
}
int rose_decode(struct sk_buff *skb, int *ns, int *nr, int *q, int *d, int *m)
{
unsigned char *frame;
frame = skb->data;
*ns = *nr = *q = *d = *m = 0;
switch (frame[2]) {
case ROSE_CALL_REQUEST:
case ROSE_CALL_ACCEPTED:
case ROSE_CLEAR_REQUEST:
case ROSE_CLEAR_CONFIRMATION:
case ROSE_RESET_REQUEST:
case ROSE_RESET_CONFIRMATION:
return frame[2];
default:
break;
}
if ((frame[2] & 0x1F) == ROSE_RR ||
(frame[2] & 0x1F) == ROSE_RNR) {
*nr = (frame[2] >> 5) & 0x07;
return frame[2] & 0x1F;
}
if ((frame[2] & 0x01) == ROSE_DATA) {
*q = (frame[0] & ROSE_Q_BIT) == ROSE_Q_BIT;
*d = (frame[0] & ROSE_D_BIT) == ROSE_D_BIT;
*m = (frame[2] & ROSE_M_BIT) == ROSE_M_BIT;
*nr = (frame[2] >> 5) & 0x07;
*ns = (frame[2] >> 1) & 0x07;
return ROSE_DATA;
}
return ROSE_ILLEGAL;
}
static int rose_parse_national(unsigned char *p, struct rose_facilities_struct *facilities, int len)
{
unsigned char *pt;
unsigned char l, lg, n = 0;
int fac_national_digis_received = 0;
do {
switch (*p & 0xC0) {
case 0x00:
if (len < 2)
return -1;
p += 2;
n += 2;
len -= 2;
break;
case 0x40:
if (len < 3)
return -1;
if (*p == FAC_NATIONAL_RAND)
facilities->rand = ((p[1] << 8) & 0xFF00) + ((p[2] << 0) & 0x00FF);
p += 3;
n += 3;
len -= 3;
break;
case 0x80:
if (len < 4)
return -1;
p += 4;
n += 4;
len -= 4;
break;
case 0xC0:
if (len < 2)
return -1;
l = p[1];
if (len < 2 + l)
return -1;
if (*p == FAC_NATIONAL_DEST_DIGI) {
if (!fac_national_digis_received) {
if (l < AX25_ADDR_LEN)
return -1;
memcpy(&facilities->source_digis[0], p + 2, AX25_ADDR_LEN);
facilities->source_ndigis = 1;
}
}
else if (*p == FAC_NATIONAL_SRC_DIGI) {
if (!fac_national_digis_received) {
if (l < AX25_ADDR_LEN)
return -1;
memcpy(&facilities->dest_digis[0], p + 2, AX25_ADDR_LEN);
facilities->dest_ndigis = 1;
}
}
else if (*p == FAC_NATIONAL_FAIL_CALL) {
if (l < AX25_ADDR_LEN)
return -1;
memcpy(&facilities->fail_call, p + 2, AX25_ADDR_LEN);
}
else if (*p == FAC_NATIONAL_FAIL_ADD) {
if (l < 1 + ROSE_ADDR_LEN)
return -1;
memcpy(&facilities->fail_addr, p + 3, ROSE_ADDR_LEN);
}
else if (*p == FAC_NATIONAL_DIGIS) {
if (l % AX25_ADDR_LEN)
return -1;
fac_national_digis_received = 1;
facilities->source_ndigis = 0;
facilities->dest_ndigis = 0;
for (pt = p + 2, lg = 0 ; lg < l ; pt += AX25_ADDR_LEN, lg += AX25_ADDR_LEN) {
if (pt[6] & AX25_HBIT) {
if (facilities->dest_ndigis >= ROSE_MAX_DIGIS)
return -1;
memcpy(&facilities->dest_digis[facilities->dest_ndigis++], pt, AX25_ADDR_LEN);
} else {
if (facilities->source_ndigis >= ROSE_MAX_DIGIS)
return -1;
memcpy(&facilities->source_digis[facilities->source_ndigis++], pt, AX25_ADDR_LEN);
}
}
}
p += l + 2;
n += l + 2;
len -= l + 2;
break;
}
} while (*p != 0x00 && len > 0);
return n;
}
static int rose_parse_ccitt(unsigned char *p, struct rose_facilities_struct *facilities, int len)
{
unsigned char l, n = 0;
char callsign[11];
do {
switch (*p & 0xC0) {
case 0x00:
if (len < 2)
return -1;
p += 2;
n += 2;
len -= 2;
break;
case 0x40:
if (len < 3)
return -1;
p += 3;
n += 3;
len -= 3;
break;
case 0x80:
if (len < 4)
return -1;
p += 4;
n += 4;
len -= 4;
break;
case 0xC0:
if (len < 2)
return -1;
l = p[1];
/* Prevent overflows*/
if (l < 10 || l > 20)
return -1;
if (*p == FAC_CCITT_DEST_NSAP) {
memcpy(&facilities->source_addr, p + 7, ROSE_ADDR_LEN);
memcpy(callsign, p + 12, l - 10);
callsign[l - 10] = '\0';
asc2ax(&facilities->source_call, callsign);
}
if (*p == FAC_CCITT_SRC_NSAP) {
memcpy(&facilities->dest_addr, p + 7, ROSE_ADDR_LEN);
memcpy(callsign, p + 12, l - 10);
callsign[l - 10] = '\0';
asc2ax(&facilities->dest_call, callsign);
}
p += l + 2;
n += l + 2;
len -= l + 2;
break;
}
} while (*p != 0x00 && len > 0);
return n;
}
int rose_parse_facilities(unsigned char *p, unsigned packet_len,
struct rose_facilities_struct *facilities)
{
int facilities_len, len;
facilities_len = *p++;
if (facilities_len == 0 || (unsigned int)facilities_len > packet_len)
return 0;
while (facilities_len >= 3 && *p == 0x00) {
facilities_len--;
p++;
switch (*p) {
case FAC_NATIONAL: /* National */
len = rose_parse_national(p + 1, facilities, facilities_len - 1);
break;
case FAC_CCITT: /* CCITT */
len = rose_parse_ccitt(p + 1, facilities, facilities_len - 1);
break;
default:
printk(KERN_DEBUG "ROSE: rose_parse_facilities - unknown facilities family %02X\n", *p);
len = 1;
break;
}
if (len < 0)
return 0;
if (WARN_ON(len >= facilities_len))
return 0;
facilities_len -= len + 1;
p += len + 1;
}
return facilities_len == 0;
}
static int rose_create_facilities(unsigned char *buffer, struct rose_sock *rose)
{
unsigned char *p = buffer + 1;
char *callsign;
char buf[11];
int len, nb;
/* National Facilities */
if (rose->rand != 0 || rose->source_ndigis == 1 || rose->dest_ndigis == 1) {
*p++ = 0x00;
*p++ = FAC_NATIONAL;
if (rose->rand != 0) {
*p++ = FAC_NATIONAL_RAND;
*p++ = (rose->rand >> 8) & 0xFF;
*p++ = (rose->rand >> 0) & 0xFF;
}
/* Sent before older facilities */
if ((rose->source_ndigis > 0) || (rose->dest_ndigis > 0)) {
int maxdigi = 0;
*p++ = FAC_NATIONAL_DIGIS;
*p++ = AX25_ADDR_LEN * (rose->source_ndigis + rose->dest_ndigis);
for (nb = 0 ; nb < rose->source_ndigis ; nb++) {
if (++maxdigi >= ROSE_MAX_DIGIS)
break;
memcpy(p, &rose->source_digis[nb], AX25_ADDR_LEN);
p[6] |= AX25_HBIT;
p += AX25_ADDR_LEN;
}
for (nb = 0 ; nb < rose->dest_ndigis ; nb++) {
if (++maxdigi >= ROSE_MAX_DIGIS)
break;
memcpy(p, &rose->dest_digis[nb], AX25_ADDR_LEN);
p[6] &= ~AX25_HBIT;
p += AX25_ADDR_LEN;
}
}
/* For compatibility */
if (rose->source_ndigis > 0) {
*p++ = FAC_NATIONAL_SRC_DIGI;
*p++ = AX25_ADDR_LEN;
memcpy(p, &rose->source_digis[0], AX25_ADDR_LEN);
p += AX25_ADDR_LEN;
}
/* For compatibility */
if (rose->dest_ndigis > 0) {
*p++ = FAC_NATIONAL_DEST_DIGI;
*p++ = AX25_ADDR_LEN;
memcpy(p, &rose->dest_digis[0], AX25_ADDR_LEN);
p += AX25_ADDR_LEN;
}
}
*p++ = 0x00;
*p++ = FAC_CCITT;
*p++ = FAC_CCITT_DEST_NSAP;
callsign = ax2asc(buf, &rose->dest_call);
*p++ = strlen(callsign) + 10;
*p++ = (strlen(callsign) + 9) * 2; /* ??? */
*p++ = 0x47; *p++ = 0x00; *p++ = 0x11;
*p++ = ROSE_ADDR_LEN * 2;
memcpy(p, &rose->dest_addr, ROSE_ADDR_LEN);
p += ROSE_ADDR_LEN;
memcpy(p, callsign, strlen(callsign));
p += strlen(callsign);
*p++ = FAC_CCITT_SRC_NSAP;
callsign = ax2asc(buf, &rose->source_call);
*p++ = strlen(callsign) + 10;
*p++ = (strlen(callsign) + 9) * 2; /* ??? */
*p++ = 0x47; *p++ = 0x00; *p++ = 0x11;
*p++ = ROSE_ADDR_LEN * 2;
memcpy(p, &rose->source_addr, ROSE_ADDR_LEN);
p += ROSE_ADDR_LEN;
memcpy(p, callsign, strlen(callsign));
p += strlen(callsign);
len = p - buffer;
buffer[0] = len - 1;
return len;
}
void rose_disconnect(struct sock *sk, int reason, int cause, int diagnostic)
{
struct rose_sock *rose = rose_sk(sk);
rose_stop_timer(sk);
rose_stop_idletimer(sk);
rose_clear_queues(sk);
rose->lci = 0;
rose->state = ROSE_STATE_0;
if (cause != -1)
rose->cause = cause;
if (diagnostic != -1)
rose->diagnostic = diagnostic;
sk->sk_state = TCP_CLOSE;
sk->sk_err = reason;
sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
}
}
| linux-master | net/rose/rose_subr.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) Jonathan Naylor G4KLX ([email protected])
* Copyright (C) 2002 Ralf Baechle DO1GRB ([email protected])
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <net/rose.h>
static void rose_heartbeat_expiry(struct timer_list *t);
static void rose_timer_expiry(struct timer_list *);
static void rose_idletimer_expiry(struct timer_list *);
void rose_start_heartbeat(struct sock *sk)
{
sk_stop_timer(sk, &sk->sk_timer);
sk->sk_timer.function = rose_heartbeat_expiry;
sk->sk_timer.expires = jiffies + 5 * HZ;
sk_reset_timer(sk, &sk->sk_timer, sk->sk_timer.expires);
}
void rose_start_t1timer(struct sock *sk)
{
struct rose_sock *rose = rose_sk(sk);
sk_stop_timer(sk, &rose->timer);
rose->timer.function = rose_timer_expiry;
rose->timer.expires = jiffies + rose->t1;
sk_reset_timer(sk, &rose->timer, rose->timer.expires);
}
void rose_start_t2timer(struct sock *sk)
{
struct rose_sock *rose = rose_sk(sk);
sk_stop_timer(sk, &rose->timer);
rose->timer.function = rose_timer_expiry;
rose->timer.expires = jiffies + rose->t2;
sk_reset_timer(sk, &rose->timer, rose->timer.expires);
}
void rose_start_t3timer(struct sock *sk)
{
struct rose_sock *rose = rose_sk(sk);
sk_stop_timer(sk, &rose->timer);
rose->timer.function = rose_timer_expiry;
rose->timer.expires = jiffies + rose->t3;
sk_reset_timer(sk, &rose->timer, rose->timer.expires);
}
void rose_start_hbtimer(struct sock *sk)
{
struct rose_sock *rose = rose_sk(sk);
sk_stop_timer(sk, &rose->timer);
rose->timer.function = rose_timer_expiry;
rose->timer.expires = jiffies + rose->hb;
sk_reset_timer(sk, &rose->timer, rose->timer.expires);
}
void rose_start_idletimer(struct sock *sk)
{
struct rose_sock *rose = rose_sk(sk);
sk_stop_timer(sk, &rose->idletimer);
if (rose->idle > 0) {
rose->idletimer.function = rose_idletimer_expiry;
rose->idletimer.expires = jiffies + rose->idle;
sk_reset_timer(sk, &rose->idletimer, rose->idletimer.expires);
}
}
void rose_stop_heartbeat(struct sock *sk)
{
sk_stop_timer(sk, &sk->sk_timer);
}
void rose_stop_timer(struct sock *sk)
{
sk_stop_timer(sk, &rose_sk(sk)->timer);
}
void rose_stop_idletimer(struct sock *sk)
{
sk_stop_timer(sk, &rose_sk(sk)->idletimer);
}
static void rose_heartbeat_expiry(struct timer_list *t)
{
struct sock *sk = from_timer(sk, t, sk_timer);
struct rose_sock *rose = rose_sk(sk);
bh_lock_sock(sk);
switch (rose->state) {
case ROSE_STATE_0:
/* Magic here: If we listen() and a new link dies before it
is accepted() it isn't 'dead' so doesn't get removed. */
if (sock_flag(sk, SOCK_DESTROY) ||
(sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
bh_unlock_sock(sk);
rose_destroy_socket(sk);
sock_put(sk);
return;
}
break;
case ROSE_STATE_3:
/*
* Check for the state of the receive buffer.
*/
if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf / 2) &&
(rose->condition & ROSE_COND_OWN_RX_BUSY)) {
rose->condition &= ~ROSE_COND_OWN_RX_BUSY;
rose->condition &= ~ROSE_COND_ACK_PENDING;
rose->vl = rose->vr;
rose_write_internal(sk, ROSE_RR);
rose_stop_timer(sk); /* HB */
break;
}
break;
}
rose_start_heartbeat(sk);
bh_unlock_sock(sk);
sock_put(sk);
}
static void rose_timer_expiry(struct timer_list *t)
{
struct rose_sock *rose = from_timer(rose, t, timer);
struct sock *sk = &rose->sock;
bh_lock_sock(sk);
switch (rose->state) {
case ROSE_STATE_1: /* T1 */
case ROSE_STATE_4: /* T2 */
rose_write_internal(sk, ROSE_CLEAR_REQUEST);
rose->state = ROSE_STATE_2;
rose_start_t3timer(sk);
break;
case ROSE_STATE_2: /* T3 */
rose->neighbour->use--;
rose_disconnect(sk, ETIMEDOUT, -1, -1);
break;
case ROSE_STATE_3: /* HB */
if (rose->condition & ROSE_COND_ACK_PENDING) {
rose->condition &= ~ROSE_COND_ACK_PENDING;
rose_enquiry_response(sk);
}
break;
}
bh_unlock_sock(sk);
sock_put(sk);
}
static void rose_idletimer_expiry(struct timer_list *t)
{
struct rose_sock *rose = from_timer(rose, t, idletimer);
struct sock *sk = &rose->sock;
bh_lock_sock(sk);
rose_clear_queues(sk);
rose_write_internal(sk, ROSE_CLEAR_REQUEST);
rose_sk(sk)->state = ROSE_STATE_2;
rose_start_t3timer(sk);
sk->sk_state = TCP_CLOSE;
sk->sk_err = 0;
sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
}
bh_unlock_sock(sk);
sock_put(sk);
}
| linux-master | net/rose/rose_timer.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) Jonathan Naylor G4KLX ([email protected])
* Copyright (C) Terry Dawson VK2KTJ ([email protected])
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <net/arp.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <linux/uaccess.h>
#include <linux/fcntl.h>
#include <linux/termios.h> /* For TIOCINQ/OUTQ */
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/init.h>
#include <net/rose.h>
#include <linux/seq_file.h>
#include <linux/export.h>
static unsigned int rose_neigh_no = 1;
static struct rose_node *rose_node_list;
static DEFINE_SPINLOCK(rose_node_list_lock);
static struct rose_neigh *rose_neigh_list;
static DEFINE_SPINLOCK(rose_neigh_list_lock);
static struct rose_route *rose_route_list;
static DEFINE_SPINLOCK(rose_route_list_lock);
struct rose_neigh *rose_loopback_neigh;
/*
* Add a new route to a node, and in the process add the node and the
* neighbour if it is new.
*/
static int __must_check rose_add_node(struct rose_route_struct *rose_route,
struct net_device *dev)
{
struct rose_node *rose_node, *rose_tmpn, *rose_tmpp;
struct rose_neigh *rose_neigh;
int i, res = 0;
spin_lock_bh(&rose_node_list_lock);
spin_lock_bh(&rose_neigh_list_lock);
rose_node = rose_node_list;
while (rose_node != NULL) {
if ((rose_node->mask == rose_route->mask) &&
(rosecmpm(&rose_route->address, &rose_node->address,
rose_route->mask) == 0))
break;
rose_node = rose_node->next;
}
if (rose_node != NULL && rose_node->loopback) {
res = -EINVAL;
goto out;
}
rose_neigh = rose_neigh_list;
while (rose_neigh != NULL) {
if (ax25cmp(&rose_route->neighbour,
&rose_neigh->callsign) == 0 &&
rose_neigh->dev == dev)
break;
rose_neigh = rose_neigh->next;
}
if (rose_neigh == NULL) {
rose_neigh = kmalloc(sizeof(*rose_neigh), GFP_ATOMIC);
if (rose_neigh == NULL) {
res = -ENOMEM;
goto out;
}
rose_neigh->callsign = rose_route->neighbour;
rose_neigh->digipeat = NULL;
rose_neigh->ax25 = NULL;
rose_neigh->dev = dev;
rose_neigh->count = 0;
rose_neigh->use = 0;
rose_neigh->dce_mode = 0;
rose_neigh->loopback = 0;
rose_neigh->number = rose_neigh_no++;
rose_neigh->restarted = 0;
skb_queue_head_init(&rose_neigh->queue);
timer_setup(&rose_neigh->ftimer, NULL, 0);
timer_setup(&rose_neigh->t0timer, NULL, 0);
if (rose_route->ndigis != 0) {
rose_neigh->digipeat =
kmalloc(sizeof(ax25_digi), GFP_ATOMIC);
if (rose_neigh->digipeat == NULL) {
kfree(rose_neigh);
res = -ENOMEM;
goto out;
}
rose_neigh->digipeat->ndigi = rose_route->ndigis;
rose_neigh->digipeat->lastrepeat = -1;
for (i = 0; i < rose_route->ndigis; i++) {
rose_neigh->digipeat->calls[i] =
rose_route->digipeaters[i];
rose_neigh->digipeat->repeated[i] = 0;
}
}
rose_neigh->next = rose_neigh_list;
rose_neigh_list = rose_neigh;
}
/*
* This is a new node to be inserted into the list. Find where it needs
* to be inserted into the list, and insert it. We want to be sure
* to order the list in descending order of mask size to ensure that
* later when we are searching this list the first match will be the
* best match.
*/
if (rose_node == NULL) {
rose_tmpn = rose_node_list;
rose_tmpp = NULL;
while (rose_tmpn != NULL) {
if (rose_tmpn->mask > rose_route->mask) {
rose_tmpp = rose_tmpn;
rose_tmpn = rose_tmpn->next;
} else {
break;
}
}
/* create new node */
rose_node = kmalloc(sizeof(*rose_node), GFP_ATOMIC);
if (rose_node == NULL) {
res = -ENOMEM;
goto out;
}
rose_node->address = rose_route->address;
rose_node->mask = rose_route->mask;
rose_node->count = 1;
rose_node->loopback = 0;
rose_node->neighbour[0] = rose_neigh;
if (rose_tmpn == NULL) {
if (rose_tmpp == NULL) { /* Empty list */
rose_node_list = rose_node;
rose_node->next = NULL;
} else {
rose_tmpp->next = rose_node;
rose_node->next = NULL;
}
} else {
if (rose_tmpp == NULL) { /* 1st node */
rose_node->next = rose_node_list;
rose_node_list = rose_node;
} else {
rose_tmpp->next = rose_node;
rose_node->next = rose_tmpn;
}
}
rose_neigh->count++;
goto out;
}
/* We have space, slot it in */
if (rose_node->count < 3) {
rose_node->neighbour[rose_node->count] = rose_neigh;
rose_node->count++;
rose_neigh->count++;
}
out:
spin_unlock_bh(&rose_neigh_list_lock);
spin_unlock_bh(&rose_node_list_lock);
return res;
}
/*
* Caller is holding rose_node_list_lock.
*/
static void rose_remove_node(struct rose_node *rose_node)
{
struct rose_node *s;
if ((s = rose_node_list) == rose_node) {
rose_node_list = rose_node->next;
kfree(rose_node);
return;
}
while (s != NULL && s->next != NULL) {
if (s->next == rose_node) {
s->next = rose_node->next;
kfree(rose_node);
return;
}
s = s->next;
}
}
/*
* Caller is holding rose_neigh_list_lock.
*/
static void rose_remove_neigh(struct rose_neigh *rose_neigh)
{
struct rose_neigh *s;
del_timer_sync(&rose_neigh->ftimer);
del_timer_sync(&rose_neigh->t0timer);
skb_queue_purge(&rose_neigh->queue);
if ((s = rose_neigh_list) == rose_neigh) {
rose_neigh_list = rose_neigh->next;
if (rose_neigh->ax25)
ax25_cb_put(rose_neigh->ax25);
kfree(rose_neigh->digipeat);
kfree(rose_neigh);
return;
}
while (s != NULL && s->next != NULL) {
if (s->next == rose_neigh) {
s->next = rose_neigh->next;
if (rose_neigh->ax25)
ax25_cb_put(rose_neigh->ax25);
kfree(rose_neigh->digipeat);
kfree(rose_neigh);
return;
}
s = s->next;
}
}
/*
* Caller is holding rose_route_list_lock.
*/
static void rose_remove_route(struct rose_route *rose_route)
{
struct rose_route *s;
if (rose_route->neigh1 != NULL)
rose_route->neigh1->use--;
if (rose_route->neigh2 != NULL)
rose_route->neigh2->use--;
if ((s = rose_route_list) == rose_route) {
rose_route_list = rose_route->next;
kfree(rose_route);
return;
}
while (s != NULL && s->next != NULL) {
if (s->next == rose_route) {
s->next = rose_route->next;
kfree(rose_route);
return;
}
s = s->next;
}
}
/*
* "Delete" a node. Strictly speaking remove a route to a node. The node
* is only deleted if no routes are left to it.
*/
static int rose_del_node(struct rose_route_struct *rose_route,
struct net_device *dev)
{
struct rose_node *rose_node;
struct rose_neigh *rose_neigh;
int i, err = 0;
spin_lock_bh(&rose_node_list_lock);
spin_lock_bh(&rose_neigh_list_lock);
rose_node = rose_node_list;
while (rose_node != NULL) {
if ((rose_node->mask == rose_route->mask) &&
(rosecmpm(&rose_route->address, &rose_node->address,
rose_route->mask) == 0))
break;
rose_node = rose_node->next;
}
if (rose_node == NULL || rose_node->loopback) {
err = -EINVAL;
goto out;
}
rose_neigh = rose_neigh_list;
while (rose_neigh != NULL) {
if (ax25cmp(&rose_route->neighbour,
&rose_neigh->callsign) == 0 &&
rose_neigh->dev == dev)
break;
rose_neigh = rose_neigh->next;
}
if (rose_neigh == NULL) {
err = -EINVAL;
goto out;
}
for (i = 0; i < rose_node->count; i++) {
if (rose_node->neighbour[i] == rose_neigh) {
rose_neigh->count--;
if (rose_neigh->count == 0 && rose_neigh->use == 0)
rose_remove_neigh(rose_neigh);
rose_node->count--;
if (rose_node->count == 0) {
rose_remove_node(rose_node);
} else {
switch (i) {
case 0:
rose_node->neighbour[0] =
rose_node->neighbour[1];
fallthrough;
case 1:
rose_node->neighbour[1] =
rose_node->neighbour[2];
break;
case 2:
break;
}
}
goto out;
}
}
err = -EINVAL;
out:
spin_unlock_bh(&rose_neigh_list_lock);
spin_unlock_bh(&rose_node_list_lock);
return err;
}
/*
* Add the loopback neighbour.
*/
void rose_add_loopback_neigh(void)
{
struct rose_neigh *sn;
rose_loopback_neigh = kmalloc(sizeof(struct rose_neigh), GFP_KERNEL);
if (!rose_loopback_neigh)
return;
sn = rose_loopback_neigh;
sn->callsign = null_ax25_address;
sn->digipeat = NULL;
sn->ax25 = NULL;
sn->dev = NULL;
sn->count = 0;
sn->use = 0;
sn->dce_mode = 1;
sn->loopback = 1;
sn->number = rose_neigh_no++;
sn->restarted = 1;
skb_queue_head_init(&sn->queue);
timer_setup(&sn->ftimer, NULL, 0);
timer_setup(&sn->t0timer, NULL, 0);
spin_lock_bh(&rose_neigh_list_lock);
sn->next = rose_neigh_list;
rose_neigh_list = sn;
spin_unlock_bh(&rose_neigh_list_lock);
}
/*
* Add a loopback node.
*/
int rose_add_loopback_node(const rose_address *address)
{
struct rose_node *rose_node;
int err = 0;
spin_lock_bh(&rose_node_list_lock);
rose_node = rose_node_list;
while (rose_node != NULL) {
if ((rose_node->mask == 10) &&
(rosecmpm(address, &rose_node->address, 10) == 0) &&
rose_node->loopback)
break;
rose_node = rose_node->next;
}
if (rose_node != NULL)
goto out;
if ((rose_node = kmalloc(sizeof(*rose_node), GFP_ATOMIC)) == NULL) {
err = -ENOMEM;
goto out;
}
rose_node->address = *address;
rose_node->mask = 10;
rose_node->count = 1;
rose_node->loopback = 1;
rose_node->neighbour[0] = rose_loopback_neigh;
/* Insert at the head of list. Address is always mask=10 */
rose_node->next = rose_node_list;
rose_node_list = rose_node;
rose_loopback_neigh->count++;
out:
spin_unlock_bh(&rose_node_list_lock);
return err;
}
/*
* Delete a loopback node.
*/
void rose_del_loopback_node(const rose_address *address)
{
struct rose_node *rose_node;
spin_lock_bh(&rose_node_list_lock);
rose_node = rose_node_list;
while (rose_node != NULL) {
if ((rose_node->mask == 10) &&
(rosecmpm(address, &rose_node->address, 10) == 0) &&
rose_node->loopback)
break;
rose_node = rose_node->next;
}
if (rose_node == NULL)
goto out;
rose_remove_node(rose_node);
rose_loopback_neigh->count--;
out:
spin_unlock_bh(&rose_node_list_lock);
}
/*
* A device has been removed. Remove its routes and neighbours.
*/
void rose_rt_device_down(struct net_device *dev)
{
struct rose_neigh *s, *rose_neigh;
struct rose_node *t, *rose_node;
int i;
spin_lock_bh(&rose_node_list_lock);
spin_lock_bh(&rose_neigh_list_lock);
rose_neigh = rose_neigh_list;
while (rose_neigh != NULL) {
s = rose_neigh;
rose_neigh = rose_neigh->next;
if (s->dev != dev)
continue;
rose_node = rose_node_list;
while (rose_node != NULL) {
t = rose_node;
rose_node = rose_node->next;
for (i = 0; i < t->count; i++) {
if (t->neighbour[i] != s)
continue;
t->count--;
switch (i) {
case 0:
t->neighbour[0] = t->neighbour[1];
fallthrough;
case 1:
t->neighbour[1] = t->neighbour[2];
break;
case 2:
break;
}
}
if (t->count <= 0)
rose_remove_node(t);
}
rose_remove_neigh(s);
}
spin_unlock_bh(&rose_neigh_list_lock);
spin_unlock_bh(&rose_node_list_lock);
}
#if 0 /* Currently unused */
/*
* A device has been removed. Remove its links.
*/
void rose_route_device_down(struct net_device *dev)
{
struct rose_route *s, *rose_route;
spin_lock_bh(&rose_route_list_lock);
rose_route = rose_route_list;
while (rose_route != NULL) {
s = rose_route;
rose_route = rose_route->next;
if (s->neigh1->dev == dev || s->neigh2->dev == dev)
rose_remove_route(s);
}
spin_unlock_bh(&rose_route_list_lock);
}
#endif
/*
* Clear all nodes and neighbours out, except for neighbours with
* active connections going through them.
* Do not clear loopback neighbour and nodes.
*/
static int rose_clear_routes(void)
{
struct rose_neigh *s, *rose_neigh;
struct rose_node *t, *rose_node;
spin_lock_bh(&rose_node_list_lock);
spin_lock_bh(&rose_neigh_list_lock);
rose_neigh = rose_neigh_list;
rose_node = rose_node_list;
while (rose_node != NULL) {
t = rose_node;
rose_node = rose_node->next;
if (!t->loopback)
rose_remove_node(t);
}
while (rose_neigh != NULL) {
s = rose_neigh;
rose_neigh = rose_neigh->next;
if (s->use == 0 && !s->loopback) {
s->count = 0;
rose_remove_neigh(s);
}
}
spin_unlock_bh(&rose_neigh_list_lock);
spin_unlock_bh(&rose_node_list_lock);
return 0;
}
/*
* Check that the device given is a valid AX.25 interface that is "up".
* called with RTNL
*/
static struct net_device *rose_ax25_dev_find(char *devname)
{
struct net_device *dev;
if ((dev = __dev_get_by_name(&init_net, devname)) == NULL)
return NULL;
if ((dev->flags & IFF_UP) && dev->type == ARPHRD_AX25)
return dev;
return NULL;
}
/*
* Find the first active ROSE device, usually "rose0".
*/
struct net_device *rose_dev_first(void)
{
struct net_device *dev, *first = NULL;
rcu_read_lock();
for_each_netdev_rcu(&init_net, dev) {
if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE)
if (first == NULL || strncmp(dev->name, first->name, 3) < 0)
first = dev;
}
if (first)
dev_hold(first);
rcu_read_unlock();
return first;
}
/*
* Find the ROSE device for the given address.
*/
struct net_device *rose_dev_get(rose_address *addr)
{
struct net_device *dev;
rcu_read_lock();
for_each_netdev_rcu(&init_net, dev) {
if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE &&
rosecmp(addr, (const rose_address *)dev->dev_addr) == 0) {
dev_hold(dev);
goto out;
}
}
dev = NULL;
out:
rcu_read_unlock();
return dev;
}
static int rose_dev_exists(rose_address *addr)
{
struct net_device *dev;
rcu_read_lock();
for_each_netdev_rcu(&init_net, dev) {
if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE &&
rosecmp(addr, (const rose_address *)dev->dev_addr) == 0)
goto out;
}
dev = NULL;
out:
rcu_read_unlock();
return dev != NULL;
}
struct rose_route *rose_route_free_lci(unsigned int lci, struct rose_neigh *neigh)
{
struct rose_route *rose_route;
for (rose_route = rose_route_list; rose_route != NULL; rose_route = rose_route->next)
if ((rose_route->neigh1 == neigh && rose_route->lci1 == lci) ||
(rose_route->neigh2 == neigh && rose_route->lci2 == lci))
return rose_route;
return NULL;
}
/*
* Find a neighbour or a route given a ROSE address.
*/
struct rose_neigh *rose_get_neigh(rose_address *addr, unsigned char *cause,
unsigned char *diagnostic, int route_frame)
{
struct rose_neigh *res = NULL;
struct rose_node *node;
int failed = 0;
int i;
if (!route_frame) spin_lock_bh(&rose_node_list_lock);
for (node = rose_node_list; node != NULL; node = node->next) {
if (rosecmpm(addr, &node->address, node->mask) == 0) {
for (i = 0; i < node->count; i++) {
if (node->neighbour[i]->restarted) {
res = node->neighbour[i];
goto out;
}
}
}
}
if (!route_frame) { /* connect request */
for (node = rose_node_list; node != NULL; node = node->next) {
if (rosecmpm(addr, &node->address, node->mask) == 0) {
for (i = 0; i < node->count; i++) {
if (!rose_ftimer_running(node->neighbour[i])) {
res = node->neighbour[i];
goto out;
}
failed = 1;
}
}
}
}
if (failed) {
*cause = ROSE_OUT_OF_ORDER;
*diagnostic = 0;
} else {
*cause = ROSE_NOT_OBTAINABLE;
*diagnostic = 0;
}
out:
if (!route_frame) spin_unlock_bh(&rose_node_list_lock);
return res;
}
/*
* Handle the ioctls that control the routing functions.
*/
int rose_rt_ioctl(unsigned int cmd, void __user *arg)
{
struct rose_route_struct rose_route;
struct net_device *dev;
int err;
switch (cmd) {
case SIOCADDRT:
if (copy_from_user(&rose_route, arg, sizeof(struct rose_route_struct)))
return -EFAULT;
if ((dev = rose_ax25_dev_find(rose_route.device)) == NULL)
return -EINVAL;
if (rose_dev_exists(&rose_route.address)) /* Can't add routes to ourself */
return -EINVAL;
if (rose_route.mask > 10) /* Mask can't be more than 10 digits */
return -EINVAL;
if (rose_route.ndigis > AX25_MAX_DIGIS)
return -EINVAL;
err = rose_add_node(&rose_route, dev);
return err;
case SIOCDELRT:
if (copy_from_user(&rose_route, arg, sizeof(struct rose_route_struct)))
return -EFAULT;
if ((dev = rose_ax25_dev_find(rose_route.device)) == NULL)
return -EINVAL;
err = rose_del_node(&rose_route, dev);
return err;
case SIOCRSCLRRT:
return rose_clear_routes();
default:
return -EINVAL;
}
return 0;
}
static void rose_del_route_by_neigh(struct rose_neigh *rose_neigh)
{
struct rose_route *rose_route, *s;
rose_neigh->restarted = 0;
rose_stop_t0timer(rose_neigh);
rose_start_ftimer(rose_neigh);
skb_queue_purge(&rose_neigh->queue);
spin_lock_bh(&rose_route_list_lock);
rose_route = rose_route_list;
while (rose_route != NULL) {
if ((rose_route->neigh1 == rose_neigh && rose_route->neigh2 == rose_neigh) ||
(rose_route->neigh1 == rose_neigh && rose_route->neigh2 == NULL) ||
(rose_route->neigh2 == rose_neigh && rose_route->neigh1 == NULL)) {
s = rose_route->next;
rose_remove_route(rose_route);
rose_route = s;
continue;
}
if (rose_route->neigh1 == rose_neigh) {
rose_route->neigh1->use--;
rose_route->neigh1 = NULL;
rose_transmit_clear_request(rose_route->neigh2, rose_route->lci2, ROSE_OUT_OF_ORDER, 0);
}
if (rose_route->neigh2 == rose_neigh) {
rose_route->neigh2->use--;
rose_route->neigh2 = NULL;
rose_transmit_clear_request(rose_route->neigh1, rose_route->lci1, ROSE_OUT_OF_ORDER, 0);
}
rose_route = rose_route->next;
}
spin_unlock_bh(&rose_route_list_lock);
}
/*
* A level 2 link has timed out, therefore it appears to be a poor link,
* then don't use that neighbour until it is reset. Blow away all through
* routes and connections using this route.
*/
void rose_link_failed(ax25_cb *ax25, int reason)
{
struct rose_neigh *rose_neigh;
spin_lock_bh(&rose_neigh_list_lock);
rose_neigh = rose_neigh_list;
while (rose_neigh != NULL) {
if (rose_neigh->ax25 == ax25)
break;
rose_neigh = rose_neigh->next;
}
if (rose_neigh != NULL) {
rose_neigh->ax25 = NULL;
ax25_cb_put(ax25);
rose_del_route_by_neigh(rose_neigh);
rose_kill_by_neigh(rose_neigh);
}
spin_unlock_bh(&rose_neigh_list_lock);
}
/*
* A device has been "downed" remove its link status. Blow away all
* through routes and connections that use this device.
*/
void rose_link_device_down(struct net_device *dev)
{
struct rose_neigh *rose_neigh;
for (rose_neigh = rose_neigh_list; rose_neigh != NULL; rose_neigh = rose_neigh->next) {
if (rose_neigh->dev == dev) {
rose_del_route_by_neigh(rose_neigh);
rose_kill_by_neigh(rose_neigh);
}
}
}
/*
* Route a frame to an appropriate AX.25 connection.
* A NULL ax25_cb indicates an internally generated frame.
*/
int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
{
struct rose_neigh *rose_neigh, *new_neigh;
struct rose_route *rose_route;
struct rose_facilities_struct facilities;
rose_address *src_addr, *dest_addr;
struct sock *sk;
unsigned short frametype;
unsigned int lci, new_lci;
unsigned char cause, diagnostic;
struct net_device *dev;
int res = 0;
char buf[11];
if (skb->len < ROSE_MIN_LEN)
return res;
if (!ax25)
return rose_loopback_queue(skb, NULL);
frametype = skb->data[2];
lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
if (frametype == ROSE_CALL_REQUEST &&
(skb->len <= ROSE_CALL_REQ_FACILITIES_OFF ||
skb->data[ROSE_CALL_REQ_ADDR_LEN_OFF] !=
ROSE_CALL_REQ_ADDR_LEN_VAL))
return res;
src_addr = (rose_address *)(skb->data + ROSE_CALL_REQ_SRC_ADDR_OFF);
dest_addr = (rose_address *)(skb->data + ROSE_CALL_REQ_DEST_ADDR_OFF);
spin_lock_bh(&rose_neigh_list_lock);
spin_lock_bh(&rose_route_list_lock);
rose_neigh = rose_neigh_list;
while (rose_neigh != NULL) {
if (ax25cmp(&ax25->dest_addr, &rose_neigh->callsign) == 0 &&
ax25->ax25_dev->dev == rose_neigh->dev)
break;
rose_neigh = rose_neigh->next;
}
if (rose_neigh == NULL) {
printk("rose_route : unknown neighbour or device %s\n",
ax2asc(buf, &ax25->dest_addr));
goto out;
}
/*
* Obviously the link is working, halt the ftimer.
*/
rose_stop_ftimer(rose_neigh);
/*
* LCI of zero is always for us, and its always a restart
* frame.
*/
if (lci == 0) {
rose_link_rx_restart(skb, rose_neigh, frametype);
goto out;
}
/*
* Find an existing socket.
*/
if ((sk = rose_find_socket(lci, rose_neigh)) != NULL) {
if (frametype == ROSE_CALL_REQUEST) {
struct rose_sock *rose = rose_sk(sk);
/* Remove an existing unused socket */
rose_clear_queues(sk);
rose->cause = ROSE_NETWORK_CONGESTION;
rose->diagnostic = 0;
rose->neighbour->use--;
rose->neighbour = NULL;
rose->lci = 0;
rose->state = ROSE_STATE_0;
sk->sk_state = TCP_CLOSE;
sk->sk_err = 0;
sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
}
}
else {
skb_reset_transport_header(skb);
res = rose_process_rx_frame(sk, skb);
goto out;
}
}
/*
* Is is a Call Request and is it for us ?
*/
if (frametype == ROSE_CALL_REQUEST)
if ((dev = rose_dev_get(dest_addr)) != NULL) {
res = rose_rx_call_request(skb, dev, rose_neigh, lci);
dev_put(dev);
goto out;
}
if (!sysctl_rose_routing_control) {
rose_transmit_clear_request(rose_neigh, lci, ROSE_NOT_OBTAINABLE, 0);
goto out;
}
/*
* Route it to the next in line if we have an entry for it.
*/
rose_route = rose_route_list;
while (rose_route != NULL) {
if (rose_route->lci1 == lci &&
rose_route->neigh1 == rose_neigh) {
if (frametype == ROSE_CALL_REQUEST) {
/* F6FBB - Remove an existing unused route */
rose_remove_route(rose_route);
break;
} else if (rose_route->neigh2 != NULL) {
skb->data[0] &= 0xF0;
skb->data[0] |= (rose_route->lci2 >> 8) & 0x0F;
skb->data[1] = (rose_route->lci2 >> 0) & 0xFF;
rose_transmit_link(skb, rose_route->neigh2);
if (frametype == ROSE_CLEAR_CONFIRMATION)
rose_remove_route(rose_route);
res = 1;
goto out;
} else {
if (frametype == ROSE_CLEAR_CONFIRMATION)
rose_remove_route(rose_route);
goto out;
}
}
if (rose_route->lci2 == lci &&
rose_route->neigh2 == rose_neigh) {
if (frametype == ROSE_CALL_REQUEST) {
/* F6FBB - Remove an existing unused route */
rose_remove_route(rose_route);
break;
} else if (rose_route->neigh1 != NULL) {
skb->data[0] &= 0xF0;
skb->data[0] |= (rose_route->lci1 >> 8) & 0x0F;
skb->data[1] = (rose_route->lci1 >> 0) & 0xFF;
rose_transmit_link(skb, rose_route->neigh1);
if (frametype == ROSE_CLEAR_CONFIRMATION)
rose_remove_route(rose_route);
res = 1;
goto out;
} else {
if (frametype == ROSE_CLEAR_CONFIRMATION)
rose_remove_route(rose_route);
goto out;
}
}
rose_route = rose_route->next;
}
/*
* We know that:
* 1. The frame isn't for us,
* 2. It isn't "owned" by any existing route.
*/
if (frametype != ROSE_CALL_REQUEST) { /* XXX */
res = 0;
goto out;
}
memset(&facilities, 0x00, sizeof(struct rose_facilities_struct));
if (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF,
skb->len - ROSE_CALL_REQ_FACILITIES_OFF,
&facilities)) {
rose_transmit_clear_request(rose_neigh, lci, ROSE_INVALID_FACILITY, 76);
goto out;
}
/*
* Check for routing loops.
*/
rose_route = rose_route_list;
while (rose_route != NULL) {
if (rose_route->rand == facilities.rand &&
rosecmp(src_addr, &rose_route->src_addr) == 0 &&
ax25cmp(&facilities.dest_call, &rose_route->src_call) == 0 &&
ax25cmp(&facilities.source_call, &rose_route->dest_call) == 0) {
rose_transmit_clear_request(rose_neigh, lci, ROSE_NOT_OBTAINABLE, 120);
goto out;
}
rose_route = rose_route->next;
}
if ((new_neigh = rose_get_neigh(dest_addr, &cause, &diagnostic, 1)) == NULL) {
rose_transmit_clear_request(rose_neigh, lci, cause, diagnostic);
goto out;
}
if ((new_lci = rose_new_lci(new_neigh)) == 0) {
rose_transmit_clear_request(rose_neigh, lci, ROSE_NETWORK_CONGESTION, 71);
goto out;
}
if ((rose_route = kmalloc(sizeof(*rose_route), GFP_ATOMIC)) == NULL) {
rose_transmit_clear_request(rose_neigh, lci, ROSE_NETWORK_CONGESTION, 120);
goto out;
}
rose_route->lci1 = lci;
rose_route->src_addr = *src_addr;
rose_route->dest_addr = *dest_addr;
rose_route->src_call = facilities.dest_call;
rose_route->dest_call = facilities.source_call;
rose_route->rand = facilities.rand;
rose_route->neigh1 = rose_neigh;
rose_route->lci2 = new_lci;
rose_route->neigh2 = new_neigh;
rose_route->neigh1->use++;
rose_route->neigh2->use++;
rose_route->next = rose_route_list;
rose_route_list = rose_route;
skb->data[0] &= 0xF0;
skb->data[0] |= (rose_route->lci2 >> 8) & 0x0F;
skb->data[1] = (rose_route->lci2 >> 0) & 0xFF;
rose_transmit_link(skb, rose_route->neigh2);
res = 1;
out:
spin_unlock_bh(&rose_route_list_lock);
spin_unlock_bh(&rose_neigh_list_lock);
return res;
}
#ifdef CONFIG_PROC_FS
static void *rose_node_start(struct seq_file *seq, loff_t *pos)
__acquires(rose_node_list_lock)
{
struct rose_node *rose_node;
int i = 1;
spin_lock_bh(&rose_node_list_lock);
if (*pos == 0)
return SEQ_START_TOKEN;
for (rose_node = rose_node_list; rose_node && i < *pos;
rose_node = rose_node->next, ++i);
return (i == *pos) ? rose_node : NULL;
}
static void *rose_node_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return (v == SEQ_START_TOKEN) ? rose_node_list
: ((struct rose_node *)v)->next;
}
static void rose_node_stop(struct seq_file *seq, void *v)
__releases(rose_node_list_lock)
{
spin_unlock_bh(&rose_node_list_lock);
}
static int rose_node_show(struct seq_file *seq, void *v)
{
char rsbuf[11];
int i;
if (v == SEQ_START_TOKEN)
seq_puts(seq, "address mask n neigh neigh neigh\n");
else {
const struct rose_node *rose_node = v;
seq_printf(seq, "%-10s %04d %d",
rose2asc(rsbuf, &rose_node->address),
rose_node->mask,
rose_node->count);
for (i = 0; i < rose_node->count; i++)
seq_printf(seq, " %05d", rose_node->neighbour[i]->number);
seq_puts(seq, "\n");
}
return 0;
}
const struct seq_operations rose_node_seqops = {
.start = rose_node_start,
.next = rose_node_next,
.stop = rose_node_stop,
.show = rose_node_show,
};
static void *rose_neigh_start(struct seq_file *seq, loff_t *pos)
__acquires(rose_neigh_list_lock)
{
struct rose_neigh *rose_neigh;
int i = 1;
spin_lock_bh(&rose_neigh_list_lock);
if (*pos == 0)
return SEQ_START_TOKEN;
for (rose_neigh = rose_neigh_list; rose_neigh && i < *pos;
rose_neigh = rose_neigh->next, ++i);
return (i == *pos) ? rose_neigh : NULL;
}
static void *rose_neigh_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return (v == SEQ_START_TOKEN) ? rose_neigh_list
: ((struct rose_neigh *)v)->next;
}
static void rose_neigh_stop(struct seq_file *seq, void *v)
__releases(rose_neigh_list_lock)
{
spin_unlock_bh(&rose_neigh_list_lock);
}
static int rose_neigh_show(struct seq_file *seq, void *v)
{
char buf[11];
int i;
if (v == SEQ_START_TOKEN)
seq_puts(seq,
"addr callsign dev count use mode restart t0 tf digipeaters\n");
else {
struct rose_neigh *rose_neigh = v;
/* if (!rose_neigh->loopback) { */
seq_printf(seq, "%05d %-9s %-4s %3d %3d %3s %3s %3lu %3lu",
rose_neigh->number,
(rose_neigh->loopback) ? "RSLOOP-0" : ax2asc(buf, &rose_neigh->callsign),
rose_neigh->dev ? rose_neigh->dev->name : "???",
rose_neigh->count,
rose_neigh->use,
(rose_neigh->dce_mode) ? "DCE" : "DTE",
(rose_neigh->restarted) ? "yes" : "no",
ax25_display_timer(&rose_neigh->t0timer) / HZ,
ax25_display_timer(&rose_neigh->ftimer) / HZ);
if (rose_neigh->digipeat != NULL) {
for (i = 0; i < rose_neigh->digipeat->ndigi; i++)
seq_printf(seq, " %s", ax2asc(buf, &rose_neigh->digipeat->calls[i]));
}
seq_puts(seq, "\n");
}
return 0;
}
const struct seq_operations rose_neigh_seqops = {
.start = rose_neigh_start,
.next = rose_neigh_next,
.stop = rose_neigh_stop,
.show = rose_neigh_show,
};
static void *rose_route_start(struct seq_file *seq, loff_t *pos)
__acquires(rose_route_list_lock)
{
struct rose_route *rose_route;
int i = 1;
spin_lock_bh(&rose_route_list_lock);
if (*pos == 0)
return SEQ_START_TOKEN;
for (rose_route = rose_route_list; rose_route && i < *pos;
rose_route = rose_route->next, ++i);
return (i == *pos) ? rose_route : NULL;
}
static void *rose_route_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return (v == SEQ_START_TOKEN) ? rose_route_list
: ((struct rose_route *)v)->next;
}
static void rose_route_stop(struct seq_file *seq, void *v)
__releases(rose_route_list_lock)
{
spin_unlock_bh(&rose_route_list_lock);
}
static int rose_route_show(struct seq_file *seq, void *v)
{
char buf[11], rsbuf[11];
if (v == SEQ_START_TOKEN)
seq_puts(seq,
"lci address callsign neigh <-> lci address callsign neigh\n");
else {
struct rose_route *rose_route = v;
if (rose_route->neigh1)
seq_printf(seq,
"%3.3X %-10s %-9s %05d ",
rose_route->lci1,
rose2asc(rsbuf, &rose_route->src_addr),
ax2asc(buf, &rose_route->src_call),
rose_route->neigh1->number);
else
seq_puts(seq,
"000 * * 00000 ");
if (rose_route->neigh2)
seq_printf(seq,
"%3.3X %-10s %-9s %05d\n",
rose_route->lci2,
rose2asc(rsbuf, &rose_route->dest_addr),
ax2asc(buf, &rose_route->dest_call),
rose_route->neigh2->number);
else
seq_puts(seq,
"000 * * 00000\n");
}
return 0;
}
struct seq_operations rose_route_seqops = {
.start = rose_route_start,
.next = rose_route_next,
.stop = rose_route_stop,
.show = rose_route_show,
};
#endif /* CONFIG_PROC_FS */
/*
* Release all memory associated with ROSE routing structures.
*/
void __exit rose_rt_free(void)
{
struct rose_neigh *s, *rose_neigh = rose_neigh_list;
struct rose_node *t, *rose_node = rose_node_list;
struct rose_route *u, *rose_route = rose_route_list;
while (rose_neigh != NULL) {
s = rose_neigh;
rose_neigh = rose_neigh->next;
rose_remove_neigh(s);
}
while (rose_node != NULL) {
t = rose_node;
rose_node = rose_node->next;
rose_remove_node(t);
}
while (rose_route != NULL) {
u = rose_route;
rose_route = rose_route->next;
rose_remove_route(u);
}
}
| linux-master | net/rose/rose_route.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) Jonathan Naylor G4KLX ([email protected])
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <net/rose.h>
static void rose_ftimer_expiry(struct timer_list *);
static void rose_t0timer_expiry(struct timer_list *);
static void rose_transmit_restart_confirmation(struct rose_neigh *neigh);
static void rose_transmit_restart_request(struct rose_neigh *neigh);
void rose_start_ftimer(struct rose_neigh *neigh)
{
del_timer(&neigh->ftimer);
neigh->ftimer.function = rose_ftimer_expiry;
neigh->ftimer.expires =
jiffies + msecs_to_jiffies(sysctl_rose_link_fail_timeout);
add_timer(&neigh->ftimer);
}
static void rose_start_t0timer(struct rose_neigh *neigh)
{
del_timer(&neigh->t0timer);
neigh->t0timer.function = rose_t0timer_expiry;
neigh->t0timer.expires =
jiffies + msecs_to_jiffies(sysctl_rose_restart_request_timeout);
add_timer(&neigh->t0timer);
}
void rose_stop_ftimer(struct rose_neigh *neigh)
{
del_timer(&neigh->ftimer);
}
void rose_stop_t0timer(struct rose_neigh *neigh)
{
del_timer(&neigh->t0timer);
}
int rose_ftimer_running(struct rose_neigh *neigh)
{
return timer_pending(&neigh->ftimer);
}
static int rose_t0timer_running(struct rose_neigh *neigh)
{
return timer_pending(&neigh->t0timer);
}
static void rose_ftimer_expiry(struct timer_list *t)
{
}
static void rose_t0timer_expiry(struct timer_list *t)
{
struct rose_neigh *neigh = from_timer(neigh, t, t0timer);
rose_transmit_restart_request(neigh);
neigh->dce_mode = 0;
rose_start_t0timer(neigh);
}
/*
* Interface to ax25_send_frame. Changes my level 2 callsign depending
* on whether we have a global ROSE callsign or use the default port
* callsign.
*/
static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh)
{
const ax25_address *rose_call;
ax25_cb *ax25s;
if (ax25cmp(&rose_callsign, &null_ax25_address) == 0)
rose_call = (const ax25_address *)neigh->dev->dev_addr;
else
rose_call = &rose_callsign;
ax25s = neigh->ax25;
neigh->ax25 = ax25_send_frame(skb, 260, rose_call, &neigh->callsign, neigh->digipeat, neigh->dev);
if (ax25s)
ax25_cb_put(ax25s);
return neigh->ax25 != NULL;
}
/*
* Interface to ax25_link_up. Changes my level 2 callsign depending
* on whether we have a global ROSE callsign or use the default port
* callsign.
*/
static int rose_link_up(struct rose_neigh *neigh)
{
const ax25_address *rose_call;
ax25_cb *ax25s;
if (ax25cmp(&rose_callsign, &null_ax25_address) == 0)
rose_call = (const ax25_address *)neigh->dev->dev_addr;
else
rose_call = &rose_callsign;
ax25s = neigh->ax25;
neigh->ax25 = ax25_find_cb(rose_call, &neigh->callsign, neigh->digipeat, neigh->dev);
if (ax25s)
ax25_cb_put(ax25s);
return neigh->ax25 != NULL;
}
/*
* This handles all restart and diagnostic frames.
*/
void rose_link_rx_restart(struct sk_buff *skb, struct rose_neigh *neigh, unsigned short frametype)
{
struct sk_buff *skbn;
switch (frametype) {
case ROSE_RESTART_REQUEST:
rose_stop_t0timer(neigh);
neigh->restarted = 1;
neigh->dce_mode = (skb->data[3] == ROSE_DTE_ORIGINATED);
rose_transmit_restart_confirmation(neigh);
break;
case ROSE_RESTART_CONFIRMATION:
rose_stop_t0timer(neigh);
neigh->restarted = 1;
break;
case ROSE_DIAGNOSTIC:
pr_warn("ROSE: received diagnostic #%d - %3ph\n", skb->data[3],
skb->data + 4);
break;
default:
printk(KERN_WARNING "ROSE: received unknown %02X with LCI 000\n", frametype);
break;
}
if (neigh->restarted) {
while ((skbn = skb_dequeue(&neigh->queue)) != NULL)
if (!rose_send_frame(skbn, neigh))
kfree_skb(skbn);
}
}
/*
* This routine is called when a Restart Request is needed
*/
static void rose_transmit_restart_request(struct rose_neigh *neigh)
{
struct sk_buff *skb;
unsigned char *dptr;
int len;
len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 3;
if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
return;
skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN);
dptr = skb_put(skb, ROSE_MIN_LEN + 3);
*dptr++ = AX25_P_ROSE;
*dptr++ = ROSE_GFI;
*dptr++ = 0x00;
*dptr++ = ROSE_RESTART_REQUEST;
*dptr++ = ROSE_DTE_ORIGINATED;
*dptr++ = 0;
if (!rose_send_frame(skb, neigh))
kfree_skb(skb);
}
/*
* This routine is called when a Restart Confirmation is needed
*/
static void rose_transmit_restart_confirmation(struct rose_neigh *neigh)
{
struct sk_buff *skb;
unsigned char *dptr;
int len;
len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 1;
if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
return;
skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN);
dptr = skb_put(skb, ROSE_MIN_LEN + 1);
*dptr++ = AX25_P_ROSE;
*dptr++ = ROSE_GFI;
*dptr++ = 0x00;
*dptr++ = ROSE_RESTART_CONFIRMATION;
if (!rose_send_frame(skb, neigh))
kfree_skb(skb);
}
/*
* This routine is called when a Clear Request is needed outside of the context
* of a connected socket.
*/
void rose_transmit_clear_request(struct rose_neigh *neigh, unsigned int lci, unsigned char cause, unsigned char diagnostic)
{
struct sk_buff *skb;
unsigned char *dptr;
int len;
if (!neigh->dev)
return;
len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 3;
if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
return;
skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN);
dptr = skb_put(skb, ROSE_MIN_LEN + 3);
*dptr++ = AX25_P_ROSE;
*dptr++ = ((lci >> 8) & 0x0F) | ROSE_GFI;
*dptr++ = ((lci >> 0) & 0xFF);
*dptr++ = ROSE_CLEAR_REQUEST;
*dptr++ = cause;
*dptr++ = diagnostic;
if (!rose_send_frame(skb, neigh))
kfree_skb(skb);
}
void rose_transmit_link(struct sk_buff *skb, struct rose_neigh *neigh)
{
unsigned char *dptr;
if (neigh->loopback) {
rose_loopback_queue(skb, neigh);
return;
}
if (!rose_link_up(neigh))
neigh->restarted = 0;
dptr = skb_push(skb, 1);
*dptr++ = AX25_P_ROSE;
if (neigh->restarted) {
if (!rose_send_frame(skb, neigh))
kfree_skb(skb);
} else {
skb_queue_tail(&neigh->queue, skb);
if (!rose_t0timer_running(neigh)) {
rose_transmit_restart_request(neigh);
neigh->dce_mode = 0;
rose_start_t0timer(neigh);
}
}
}
| linux-master | net/rose/rose_link.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) Jonathan Naylor G4KLX ([email protected])
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/gfp.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <net/rose.h>
/*
* This procedure is passed a buffer descriptor for an iframe. It builds
* the rest of the control part of the frame and then writes it out.
*/
static void rose_send_iframe(struct sock *sk, struct sk_buff *skb)
{
struct rose_sock *rose = rose_sk(sk);
if (skb == NULL)
return;
skb->data[2] |= (rose->vr << 5) & 0xE0;
skb->data[2] |= (rose->vs << 1) & 0x0E;
rose_start_idletimer(sk);
rose_transmit_link(skb, rose->neighbour);
}
void rose_kick(struct sock *sk)
{
struct rose_sock *rose = rose_sk(sk);
struct sk_buff *skb, *skbn;
unsigned short start, end;
if (rose->state != ROSE_STATE_3)
return;
if (rose->condition & ROSE_COND_PEER_RX_BUSY)
return;
if (!skb_peek(&sk->sk_write_queue))
return;
start = (skb_peek(&rose->ack_queue) == NULL) ? rose->va : rose->vs;
end = (rose->va + sysctl_rose_window_size) % ROSE_MODULUS;
if (start == end)
return;
rose->vs = start;
/*
* Transmit data until either we're out of data to send or
* the window is full.
*/
skb = skb_dequeue(&sk->sk_write_queue);
do {
if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
skb_queue_head(&sk->sk_write_queue, skb);
break;
}
skb_set_owner_w(skbn, sk);
/*
* Transmit the frame copy.
*/
rose_send_iframe(sk, skbn);
rose->vs = (rose->vs + 1) % ROSE_MODULUS;
/*
* Requeue the original data frame.
*/
skb_queue_tail(&rose->ack_queue, skb);
} while (rose->vs != end &&
(skb = skb_dequeue(&sk->sk_write_queue)) != NULL);
rose->vl = rose->vr;
rose->condition &= ~ROSE_COND_ACK_PENDING;
rose_stop_timer(sk);
}
/*
* The following routines are taken from page 170 of the 7th ARRL Computer
* Networking Conference paper, as is the whole state machine.
*/
void rose_enquiry_response(struct sock *sk)
{
struct rose_sock *rose = rose_sk(sk);
if (rose->condition & ROSE_COND_OWN_RX_BUSY)
rose_write_internal(sk, ROSE_RNR);
else
rose_write_internal(sk, ROSE_RR);
rose->vl = rose->vr;
rose->condition &= ~ROSE_COND_ACK_PENDING;
rose_stop_timer(sk);
}
| linux-master | net/rose/rose_out.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2000-2002 Joakim Axelsson <[email protected]>
* Patrick Schaaf <[email protected]>
* Martin Josefsson <[email protected]>
* Copyright (C) 2003-2013 Jozsef Kadlecsik <[email protected]>
*/
/* Kernel module which implements the set match and SET target
* for netfilter/iptables.
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/ipset/ip_set.h>
#include <uapi/linux/netfilter/xt_set.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <[email protected]>");
MODULE_DESCRIPTION("Xtables: IP set match and target module");
MODULE_ALIAS("xt_SET");
MODULE_ALIAS("ipt_set");
MODULE_ALIAS("ip6t_set");
MODULE_ALIAS("ipt_SET");
MODULE_ALIAS("ip6t_SET");
static inline int
match_set(ip_set_id_t index, const struct sk_buff *skb,
const struct xt_action_param *par,
struct ip_set_adt_opt *opt, int inv)
{
if (ip_set_test(index, skb, par, opt))
inv = !inv;
return inv;
}
#define ADT_OPT(n, f, d, fs, cfs, t, p, b, po, bo) \
struct ip_set_adt_opt n = { \
.family = f, \
.dim = d, \
.flags = fs, \
.cmdflags = cfs, \
.ext.timeout = t, \
.ext.packets = p, \
.ext.bytes = b, \
.ext.packets_op = po, \
.ext.bytes_op = bo, \
}
/* Revision 0 interface: backward compatible with netfilter/iptables */
static bool
set_match_v0(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_set_info_match_v0 *info = par->matchinfo;
ADT_OPT(opt, xt_family(par), info->match_set.u.compat.dim,
info->match_set.u.compat.flags, 0, UINT_MAX,
0, 0, 0, 0);
return match_set(info->match_set.index, skb, par, &opt,
info->match_set.u.compat.flags & IPSET_INV_MATCH);
}
static void
compat_flags(struct xt_set_info_v0 *info)
{
u_int8_t i;
/* Fill out compatibility data according to enum ip_set_kopt */
info->u.compat.dim = IPSET_DIM_ZERO;
if (info->u.flags[0] & IPSET_MATCH_INV)
info->u.compat.flags |= IPSET_INV_MATCH;
for (i = 0; i < IPSET_DIM_MAX - 1 && info->u.flags[i]; i++) {
info->u.compat.dim++;
if (info->u.flags[i] & IPSET_SRC)
info->u.compat.flags |= (1 << info->u.compat.dim);
}
}
static int
set_match_v0_checkentry(const struct xt_mtchk_param *par)
{
struct xt_set_info_match_v0 *info = par->matchinfo;
ip_set_id_t index;
index = ip_set_nfnl_get_byindex(par->net, info->match_set.index);
if (index == IPSET_INVALID_ID) {
pr_info_ratelimited("Cannot find set identified by id %u to match\n",
info->match_set.index);
return -ENOENT;
}
if (info->match_set.u.flags[IPSET_DIM_MAX - 1] != 0) {
pr_info_ratelimited("set match dimension is over the limit!\n");
ip_set_nfnl_put(par->net, info->match_set.index);
return -ERANGE;
}
/* Fill out compatibility data */
compat_flags(&info->match_set);
return 0;
}
static void
set_match_v0_destroy(const struct xt_mtdtor_param *par)
{
struct xt_set_info_match_v0 *info = par->matchinfo;
ip_set_nfnl_put(par->net, info->match_set.index);
}
/* Revision 1 match */
static bool
set_match_v1(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_set_info_match_v1 *info = par->matchinfo;
ADT_OPT(opt, xt_family(par), info->match_set.dim,
info->match_set.flags, 0, UINT_MAX,
0, 0, 0, 0);
if (opt.flags & IPSET_RETURN_NOMATCH)
opt.cmdflags |= IPSET_FLAG_RETURN_NOMATCH;
return match_set(info->match_set.index, skb, par, &opt,
info->match_set.flags & IPSET_INV_MATCH);
}
static int
set_match_v1_checkentry(const struct xt_mtchk_param *par)
{
struct xt_set_info_match_v1 *info = par->matchinfo;
ip_set_id_t index;
index = ip_set_nfnl_get_byindex(par->net, info->match_set.index);
if (index == IPSET_INVALID_ID) {
pr_info_ratelimited("Cannot find set identified by id %u to match\n",
info->match_set.index);
return -ENOENT;
}
if (info->match_set.dim > IPSET_DIM_MAX) {
pr_info_ratelimited("set match dimension is over the limit!\n");
ip_set_nfnl_put(par->net, info->match_set.index);
return -ERANGE;
}
return 0;
}
static void
set_match_v1_destroy(const struct xt_mtdtor_param *par)
{
struct xt_set_info_match_v1 *info = par->matchinfo;
ip_set_nfnl_put(par->net, info->match_set.index);
}
/* Revision 3 match */
static bool
set_match_v3(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_set_info_match_v3 *info = par->matchinfo;
ADT_OPT(opt, xt_family(par), info->match_set.dim,
info->match_set.flags, info->flags, UINT_MAX,
info->packets.value, info->bytes.value,
info->packets.op, info->bytes.op);
if (info->packets.op != IPSET_COUNTER_NONE ||
info->bytes.op != IPSET_COUNTER_NONE)
opt.cmdflags |= IPSET_FLAG_MATCH_COUNTERS;
return match_set(info->match_set.index, skb, par, &opt,
info->match_set.flags & IPSET_INV_MATCH);
}
#define set_match_v3_checkentry set_match_v1_checkentry
#define set_match_v3_destroy set_match_v1_destroy
/* Revision 4 match */
static bool
set_match_v4(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_set_info_match_v4 *info = par->matchinfo;
ADT_OPT(opt, xt_family(par), info->match_set.dim,
info->match_set.flags, info->flags, UINT_MAX,
info->packets.value, info->bytes.value,
info->packets.op, info->bytes.op);
if (info->packets.op != IPSET_COUNTER_NONE ||
info->bytes.op != IPSET_COUNTER_NONE)
opt.cmdflags |= IPSET_FLAG_MATCH_COUNTERS;
return match_set(info->match_set.index, skb, par, &opt,
info->match_set.flags & IPSET_INV_MATCH);
}
#define set_match_v4_checkentry set_match_v1_checkentry
#define set_match_v4_destroy set_match_v1_destroy
/* Revision 0 interface: backward compatible with netfilter/iptables */
static unsigned int
set_target_v0(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_set_info_target_v0 *info = par->targinfo;
ADT_OPT(add_opt, xt_family(par), info->add_set.u.compat.dim,
info->add_set.u.compat.flags, 0, UINT_MAX,
0, 0, 0, 0);
ADT_OPT(del_opt, xt_family(par), info->del_set.u.compat.dim,
info->del_set.u.compat.flags, 0, UINT_MAX,
0, 0, 0, 0);
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_add(info->add_set.index, skb, par, &add_opt);
if (info->del_set.index != IPSET_INVALID_ID)
ip_set_del(info->del_set.index, skb, par, &del_opt);
return XT_CONTINUE;
}
static int
set_target_v0_checkentry(const struct xt_tgchk_param *par)
{
struct xt_set_info_target_v0 *info = par->targinfo;
ip_set_id_t index;
if (info->add_set.index != IPSET_INVALID_ID) {
index = ip_set_nfnl_get_byindex(par->net, info->add_set.index);
if (index == IPSET_INVALID_ID) {
pr_info_ratelimited("Cannot find add_set index %u as target\n",
info->add_set.index);
return -ENOENT;
}
}
if (info->del_set.index != IPSET_INVALID_ID) {
index = ip_set_nfnl_get_byindex(par->net, info->del_set.index);
if (index == IPSET_INVALID_ID) {
pr_info_ratelimited("Cannot find del_set index %u as target\n",
info->del_set.index);
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net, info->add_set.index);
return -ENOENT;
}
}
if (info->add_set.u.flags[IPSET_DIM_MAX - 1] != 0 ||
info->del_set.u.flags[IPSET_DIM_MAX - 1] != 0) {
pr_info_ratelimited("SET target dimension over the limit!\n");
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net, info->add_set.index);
if (info->del_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net, info->del_set.index);
return -ERANGE;
}
/* Fill out compatibility data */
compat_flags(&info->add_set);
compat_flags(&info->del_set);
return 0;
}
static void
set_target_v0_destroy(const struct xt_tgdtor_param *par)
{
const struct xt_set_info_target_v0 *info = par->targinfo;
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net, info->add_set.index);
if (info->del_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net, info->del_set.index);
}
/* Revision 1 target */
static unsigned int
set_target_v1(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_set_info_target_v1 *info = par->targinfo;
ADT_OPT(add_opt, xt_family(par), info->add_set.dim,
info->add_set.flags, 0, UINT_MAX,
0, 0, 0, 0);
ADT_OPT(del_opt, xt_family(par), info->del_set.dim,
info->del_set.flags, 0, UINT_MAX,
0, 0, 0, 0);
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_add(info->add_set.index, skb, par, &add_opt);
if (info->del_set.index != IPSET_INVALID_ID)
ip_set_del(info->del_set.index, skb, par, &del_opt);
return XT_CONTINUE;
}
static int
set_target_v1_checkentry(const struct xt_tgchk_param *par)
{
const struct xt_set_info_target_v1 *info = par->targinfo;
ip_set_id_t index;
if (info->add_set.index != IPSET_INVALID_ID) {
index = ip_set_nfnl_get_byindex(par->net, info->add_set.index);
if (index == IPSET_INVALID_ID) {
pr_info_ratelimited("Cannot find add_set index %u as target\n",
info->add_set.index);
return -ENOENT;
}
}
if (info->del_set.index != IPSET_INVALID_ID) {
index = ip_set_nfnl_get_byindex(par->net, info->del_set.index);
if (index == IPSET_INVALID_ID) {
pr_info_ratelimited("Cannot find del_set index %u as target\n",
info->del_set.index);
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net, info->add_set.index);
return -ENOENT;
}
}
if (info->add_set.dim > IPSET_DIM_MAX ||
info->del_set.dim > IPSET_DIM_MAX) {
pr_info_ratelimited("SET target dimension over the limit!\n");
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net, info->add_set.index);
if (info->del_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net, info->del_set.index);
return -ERANGE;
}
return 0;
}
static void
set_target_v1_destroy(const struct xt_tgdtor_param *par)
{
const struct xt_set_info_target_v1 *info = par->targinfo;
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net, info->add_set.index);
if (info->del_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net, info->del_set.index);
}
/* Revision 2 target */
static unsigned int
set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_set_info_target_v2 *info = par->targinfo;
ADT_OPT(add_opt, xt_family(par), info->add_set.dim,
info->add_set.flags, info->flags, info->timeout,
0, 0, 0, 0);
ADT_OPT(del_opt, xt_family(par), info->del_set.dim,
info->del_set.flags, 0, UINT_MAX,
0, 0, 0, 0);
/* Normalize to fit into jiffies */
if (add_opt.ext.timeout != IPSET_NO_TIMEOUT &&
add_opt.ext.timeout > IPSET_MAX_TIMEOUT)
add_opt.ext.timeout = IPSET_MAX_TIMEOUT;
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_add(info->add_set.index, skb, par, &add_opt);
if (info->del_set.index != IPSET_INVALID_ID)
ip_set_del(info->del_set.index, skb, par, &del_opt);
return XT_CONTINUE;
}
#define set_target_v2_checkentry set_target_v1_checkentry
#define set_target_v2_destroy set_target_v1_destroy
/* Revision 3 target */
#define MOPT(opt, member) ((opt).ext.skbinfo.member)
static unsigned int
set_target_v3(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_set_info_target_v3 *info = par->targinfo;
int ret;
ADT_OPT(add_opt, xt_family(par), info->add_set.dim,
info->add_set.flags, info->flags, info->timeout,
0, 0, 0, 0);
ADT_OPT(del_opt, xt_family(par), info->del_set.dim,
info->del_set.flags, 0, UINT_MAX,
0, 0, 0, 0);
ADT_OPT(map_opt, xt_family(par), info->map_set.dim,
info->map_set.flags, 0, UINT_MAX,
0, 0, 0, 0);
/* Normalize to fit into jiffies */
if (add_opt.ext.timeout != IPSET_NO_TIMEOUT &&
add_opt.ext.timeout > IPSET_MAX_TIMEOUT)
add_opt.ext.timeout = IPSET_MAX_TIMEOUT;
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_add(info->add_set.index, skb, par, &add_opt);
if (info->del_set.index != IPSET_INVALID_ID)
ip_set_del(info->del_set.index, skb, par, &del_opt);
if (info->map_set.index != IPSET_INVALID_ID) {
map_opt.cmdflags |= info->flags & (IPSET_FLAG_MAP_SKBMARK |
IPSET_FLAG_MAP_SKBPRIO |
IPSET_FLAG_MAP_SKBQUEUE);
ret = match_set(info->map_set.index, skb, par, &map_opt,
info->map_set.flags & IPSET_INV_MATCH);
if (!ret)
return XT_CONTINUE;
if (map_opt.cmdflags & IPSET_FLAG_MAP_SKBMARK)
skb->mark = (skb->mark & ~MOPT(map_opt,skbmarkmask))
^ MOPT(map_opt, skbmark);
if (map_opt.cmdflags & IPSET_FLAG_MAP_SKBPRIO)
skb->priority = MOPT(map_opt, skbprio);
if ((map_opt.cmdflags & IPSET_FLAG_MAP_SKBQUEUE) &&
skb->dev &&
skb->dev->real_num_tx_queues > MOPT(map_opt, skbqueue))
skb_set_queue_mapping(skb, MOPT(map_opt, skbqueue));
}
return XT_CONTINUE;
}
static int
set_target_v3_checkentry(const struct xt_tgchk_param *par)
{
const struct xt_set_info_target_v3 *info = par->targinfo;
ip_set_id_t index;
int ret = 0;
if (info->add_set.index != IPSET_INVALID_ID) {
index = ip_set_nfnl_get_byindex(par->net,
info->add_set.index);
if (index == IPSET_INVALID_ID) {
pr_info_ratelimited("Cannot find add_set index %u as target\n",
info->add_set.index);
return -ENOENT;
}
}
if (info->del_set.index != IPSET_INVALID_ID) {
index = ip_set_nfnl_get_byindex(par->net,
info->del_set.index);
if (index == IPSET_INVALID_ID) {
pr_info_ratelimited("Cannot find del_set index %u as target\n",
info->del_set.index);
ret = -ENOENT;
goto cleanup_add;
}
}
if (info->map_set.index != IPSET_INVALID_ID) {
if (strncmp(par->table, "mangle", 7)) {
pr_info_ratelimited("--map-set only usable from mangle table\n");
ret = -EINVAL;
goto cleanup_del;
}
if (((info->flags & IPSET_FLAG_MAP_SKBPRIO) |
(info->flags & IPSET_FLAG_MAP_SKBQUEUE)) &&
(par->hook_mask & ~(1 << NF_INET_FORWARD |
1 << NF_INET_LOCAL_OUT |
1 << NF_INET_POST_ROUTING))) {
pr_info_ratelimited("mapping of prio or/and queue is allowed only from OUTPUT/FORWARD/POSTROUTING chains\n");
ret = -EINVAL;
goto cleanup_del;
}
index = ip_set_nfnl_get_byindex(par->net,
info->map_set.index);
if (index == IPSET_INVALID_ID) {
pr_info_ratelimited("Cannot find map_set index %u as target\n",
info->map_set.index);
ret = -ENOENT;
goto cleanup_del;
}
}
if (info->add_set.dim > IPSET_DIM_MAX ||
info->del_set.dim > IPSET_DIM_MAX ||
info->map_set.dim > IPSET_DIM_MAX) {
pr_info_ratelimited("SET target dimension over the limit!\n");
ret = -ERANGE;
goto cleanup_mark;
}
return 0;
cleanup_mark:
if (info->map_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net, info->map_set.index);
cleanup_del:
if (info->del_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net, info->del_set.index);
cleanup_add:
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net, info->add_set.index);
return ret;
}
static void
set_target_v3_destroy(const struct xt_tgdtor_param *par)
{
const struct xt_set_info_target_v3 *info = par->targinfo;
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net, info->add_set.index);
if (info->del_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net, info->del_set.index);
if (info->map_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net, info->map_set.index);
}
static struct xt_match set_matches[] __read_mostly = {
{
.name = "set",
.family = NFPROTO_IPV4,
.revision = 0,
.match = set_match_v0,
.matchsize = sizeof(struct xt_set_info_match_v0),
.checkentry = set_match_v0_checkentry,
.destroy = set_match_v0_destroy,
.me = THIS_MODULE
},
{
.name = "set",
.family = NFPROTO_IPV4,
.revision = 1,
.match = set_match_v1,
.matchsize = sizeof(struct xt_set_info_match_v1),
.checkentry = set_match_v1_checkentry,
.destroy = set_match_v1_destroy,
.me = THIS_MODULE
},
{
.name = "set",
.family = NFPROTO_IPV6,
.revision = 1,
.match = set_match_v1,
.matchsize = sizeof(struct xt_set_info_match_v1),
.checkentry = set_match_v1_checkentry,
.destroy = set_match_v1_destroy,
.me = THIS_MODULE
},
/* --return-nomatch flag support */
{
.name = "set",
.family = NFPROTO_IPV4,
.revision = 2,
.match = set_match_v1,
.matchsize = sizeof(struct xt_set_info_match_v1),
.checkentry = set_match_v1_checkentry,
.destroy = set_match_v1_destroy,
.me = THIS_MODULE
},
{
.name = "set",
.family = NFPROTO_IPV6,
.revision = 2,
.match = set_match_v1,
.matchsize = sizeof(struct xt_set_info_match_v1),
.checkentry = set_match_v1_checkentry,
.destroy = set_match_v1_destroy,
.me = THIS_MODULE
},
/* counters support: update, match */
{
.name = "set",
.family = NFPROTO_IPV4,
.revision = 3,
.match = set_match_v3,
.matchsize = sizeof(struct xt_set_info_match_v3),
.checkentry = set_match_v3_checkentry,
.destroy = set_match_v3_destroy,
.me = THIS_MODULE
},
{
.name = "set",
.family = NFPROTO_IPV6,
.revision = 3,
.match = set_match_v3,
.matchsize = sizeof(struct xt_set_info_match_v3),
.checkentry = set_match_v3_checkentry,
.destroy = set_match_v3_destroy,
.me = THIS_MODULE
},
/* new revision for counters support: update, match */
{
.name = "set",
.family = NFPROTO_IPV4,
.revision = 4,
.match = set_match_v4,
.matchsize = sizeof(struct xt_set_info_match_v4),
.checkentry = set_match_v4_checkentry,
.destroy = set_match_v4_destroy,
.me = THIS_MODULE
},
{
.name = "set",
.family = NFPROTO_IPV6,
.revision = 4,
.match = set_match_v4,
.matchsize = sizeof(struct xt_set_info_match_v4),
.checkentry = set_match_v4_checkentry,
.destroy = set_match_v4_destroy,
.me = THIS_MODULE
},
};
static struct xt_target set_targets[] __read_mostly = {
{
.name = "SET",
.revision = 0,
.family = NFPROTO_IPV4,
.target = set_target_v0,
.targetsize = sizeof(struct xt_set_info_target_v0),
.checkentry = set_target_v0_checkentry,
.destroy = set_target_v0_destroy,
.me = THIS_MODULE
},
{
.name = "SET",
.revision = 1,
.family = NFPROTO_IPV4,
.target = set_target_v1,
.targetsize = sizeof(struct xt_set_info_target_v1),
.checkentry = set_target_v1_checkentry,
.destroy = set_target_v1_destroy,
.me = THIS_MODULE
},
{
.name = "SET",
.revision = 1,
.family = NFPROTO_IPV6,
.target = set_target_v1,
.targetsize = sizeof(struct xt_set_info_target_v1),
.checkentry = set_target_v1_checkentry,
.destroy = set_target_v1_destroy,
.me = THIS_MODULE
},
/* --timeout and --exist flags support */
{
.name = "SET",
.revision = 2,
.family = NFPROTO_IPV4,
.target = set_target_v2,
.targetsize = sizeof(struct xt_set_info_target_v2),
.checkentry = set_target_v2_checkentry,
.destroy = set_target_v2_destroy,
.me = THIS_MODULE
},
{
.name = "SET",
.revision = 2,
.family = NFPROTO_IPV6,
.target = set_target_v2,
.targetsize = sizeof(struct xt_set_info_target_v2),
.checkentry = set_target_v2_checkentry,
.destroy = set_target_v2_destroy,
.me = THIS_MODULE
},
/* --map-set support */
{
.name = "SET",
.revision = 3,
.family = NFPROTO_IPV4,
.target = set_target_v3,
.targetsize = sizeof(struct xt_set_info_target_v3),
.checkentry = set_target_v3_checkentry,
.destroy = set_target_v3_destroy,
.me = THIS_MODULE
},
{
.name = "SET",
.revision = 3,
.family = NFPROTO_IPV6,
.target = set_target_v3,
.targetsize = sizeof(struct xt_set_info_target_v3),
.checkentry = set_target_v3_checkentry,
.destroy = set_target_v3_destroy,
.me = THIS_MODULE
},
};
static int __init xt_set_init(void)
{
int ret = xt_register_matches(set_matches, ARRAY_SIZE(set_matches));
if (!ret) {
ret = xt_register_targets(set_targets,
ARRAY_SIZE(set_targets));
if (ret)
xt_unregister_matches(set_matches,
ARRAY_SIZE(set_matches));
}
return ret;
}
static void __exit xt_set_fini(void)
{
xt_unregister_matches(set_matches, ARRAY_SIZE(set_matches));
xt_unregister_targets(set_targets, ARRAY_SIZE(set_targets));
}
module_init(xt_set_init);
module_exit(xt_set_fini);
| linux-master | net/netfilter/xt_set.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015 Pablo Neira Ayuso <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_offload.h>
#include <net/netfilter/nf_dup_netdev.h>
#include <net/neighbour.h>
#include <net/ip.h>
struct nft_fwd_netdev {
u8 sreg_dev;
};
static void nft_fwd_netdev_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_fwd_netdev *priv = nft_expr_priv(expr);
int oif = regs->data[priv->sreg_dev];
struct sk_buff *skb = pkt->skb;
/* This is used by ifb only. */
skb->skb_iif = skb->dev->ifindex;
skb_set_redirected(skb, nft_hook(pkt) == NF_NETDEV_INGRESS);
nf_fwd_netdev_egress(pkt, oif);
regs->verdict.code = NF_STOLEN;
}
static const struct nla_policy nft_fwd_netdev_policy[NFTA_FWD_MAX + 1] = {
[NFTA_FWD_SREG_DEV] = { .type = NLA_U32 },
[NFTA_FWD_SREG_ADDR] = { .type = NLA_U32 },
[NFTA_FWD_NFPROTO] = NLA_POLICY_MAX(NLA_BE32, 255),
};
static int nft_fwd_netdev_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_fwd_netdev *priv = nft_expr_priv(expr);
if (tb[NFTA_FWD_SREG_DEV] == NULL)
return -EINVAL;
return nft_parse_register_load(tb[NFTA_FWD_SREG_DEV], &priv->sreg_dev,
sizeof(int));
}
static int nft_fwd_netdev_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
struct nft_fwd_netdev *priv = nft_expr_priv(expr);
if (nft_dump_register(skb, NFTA_FWD_SREG_DEV, priv->sreg_dev))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static int nft_fwd_netdev_offload(struct nft_offload_ctx *ctx,
struct nft_flow_rule *flow,
const struct nft_expr *expr)
{
const struct nft_fwd_netdev *priv = nft_expr_priv(expr);
int oif = ctx->regs[priv->sreg_dev].data.data[0];
return nft_fwd_dup_netdev_offload(ctx, flow, FLOW_ACTION_REDIRECT, oif);
}
static bool nft_fwd_netdev_offload_action(const struct nft_expr *expr)
{
return true;
}
struct nft_fwd_neigh {
u8 sreg_dev;
u8 sreg_addr;
u8 nfproto;
};
static void nft_fwd_neigh_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_fwd_neigh *priv = nft_expr_priv(expr);
void *addr = ®s->data[priv->sreg_addr];
int oif = regs->data[priv->sreg_dev];
unsigned int verdict = NF_STOLEN;
struct sk_buff *skb = pkt->skb;
struct net_device *dev;
int neigh_table;
switch (priv->nfproto) {
case NFPROTO_IPV4: {
struct iphdr *iph;
if (skb->protocol != htons(ETH_P_IP)) {
verdict = NFT_BREAK;
goto out;
}
if (skb_try_make_writable(skb, sizeof(*iph))) {
verdict = NF_DROP;
goto out;
}
iph = ip_hdr(skb);
ip_decrease_ttl(iph);
neigh_table = NEIGH_ARP_TABLE;
break;
}
case NFPROTO_IPV6: {
struct ipv6hdr *ip6h;
if (skb->protocol != htons(ETH_P_IPV6)) {
verdict = NFT_BREAK;
goto out;
}
if (skb_try_make_writable(skb, sizeof(*ip6h))) {
verdict = NF_DROP;
goto out;
}
ip6h = ipv6_hdr(skb);
ip6h->hop_limit--;
neigh_table = NEIGH_ND_TABLE;
break;
}
default:
verdict = NFT_BREAK;
goto out;
}
dev = dev_get_by_index_rcu(nft_net(pkt), oif);
if (dev == NULL)
return;
skb->dev = dev;
skb_clear_tstamp(skb);
neigh_xmit(neigh_table, dev, addr, skb);
out:
regs->verdict.code = verdict;
}
static int nft_fwd_neigh_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_fwd_neigh *priv = nft_expr_priv(expr);
unsigned int addr_len;
int err;
if (!tb[NFTA_FWD_SREG_DEV] ||
!tb[NFTA_FWD_SREG_ADDR] ||
!tb[NFTA_FWD_NFPROTO])
return -EINVAL;
priv->nfproto = ntohl(nla_get_be32(tb[NFTA_FWD_NFPROTO]));
switch (priv->nfproto) {
case NFPROTO_IPV4:
addr_len = sizeof(struct in_addr);
break;
case NFPROTO_IPV6:
addr_len = sizeof(struct in6_addr);
break;
default:
return -EOPNOTSUPP;
}
err = nft_parse_register_load(tb[NFTA_FWD_SREG_DEV], &priv->sreg_dev,
sizeof(int));
if (err < 0)
return err;
return nft_parse_register_load(tb[NFTA_FWD_SREG_ADDR], &priv->sreg_addr,
addr_len);
}
static int nft_fwd_neigh_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
struct nft_fwd_neigh *priv = nft_expr_priv(expr);
if (nft_dump_register(skb, NFTA_FWD_SREG_DEV, priv->sreg_dev) ||
nft_dump_register(skb, NFTA_FWD_SREG_ADDR, priv->sreg_addr) ||
nla_put_be32(skb, NFTA_FWD_NFPROTO, htonl(priv->nfproto)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static int nft_fwd_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
return nft_chain_validate_hooks(ctx->chain, (1 << NF_NETDEV_INGRESS) |
(1 << NF_NETDEV_EGRESS));
}
static struct nft_expr_type nft_fwd_netdev_type;
static const struct nft_expr_ops nft_fwd_neigh_netdev_ops = {
.type = &nft_fwd_netdev_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_fwd_neigh)),
.eval = nft_fwd_neigh_eval,
.init = nft_fwd_neigh_init,
.dump = nft_fwd_neigh_dump,
.validate = nft_fwd_validate,
.reduce = NFT_REDUCE_READONLY,
};
static const struct nft_expr_ops nft_fwd_netdev_ops = {
.type = &nft_fwd_netdev_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_fwd_netdev)),
.eval = nft_fwd_netdev_eval,
.init = nft_fwd_netdev_init,
.dump = nft_fwd_netdev_dump,
.validate = nft_fwd_validate,
.reduce = NFT_REDUCE_READONLY,
.offload = nft_fwd_netdev_offload,
.offload_action = nft_fwd_netdev_offload_action,
};
static const struct nft_expr_ops *
nft_fwd_select_ops(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
if (tb[NFTA_FWD_SREG_ADDR])
return &nft_fwd_neigh_netdev_ops;
if (tb[NFTA_FWD_SREG_DEV])
return &nft_fwd_netdev_ops;
return ERR_PTR(-EOPNOTSUPP);
}
static struct nft_expr_type nft_fwd_netdev_type __read_mostly = {
.family = NFPROTO_NETDEV,
.name = "fwd",
.select_ops = nft_fwd_select_ops,
.policy = nft_fwd_netdev_policy,
.maxattr = NFTA_FWD_MAX,
.owner = THIS_MODULE,
};
static int __init nft_fwd_netdev_module_init(void)
{
return nft_register_expr(&nft_fwd_netdev_type);
}
static void __exit nft_fwd_netdev_module_exit(void)
{
nft_unregister_expr(&nft_fwd_netdev_type);
}
module_init(nft_fwd_netdev_module_init);
module_exit(nft_fwd_netdev_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <[email protected]>");
MODULE_ALIAS_NFT_AF_EXPR(5, "fwd");
| linux-master | net/netfilter/nft_fwd_netdev.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* IP tables module for matching the value of the TTL
* (C) 2000,2001 by Harald Welte <[email protected]>
*
* Hop Limit matching module
* (C) 2001-2002 Maciej Soltysiak <[email protected]>
*/
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ipt_ttl.h>
#include <linux/netfilter_ipv6/ip6t_hl.h>
MODULE_AUTHOR("Maciej Soltysiak <[email protected]>");
MODULE_DESCRIPTION("Xtables: Hoplimit/TTL field match");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_ttl");
MODULE_ALIAS("ip6t_hl");
static bool ttl_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ipt_ttl_info *info = par->matchinfo;
const u8 ttl = ip_hdr(skb)->ttl;
switch (info->mode) {
case IPT_TTL_EQ:
return ttl == info->ttl;
case IPT_TTL_NE:
return ttl != info->ttl;
case IPT_TTL_LT:
return ttl < info->ttl;
case IPT_TTL_GT:
return ttl > info->ttl;
}
return false;
}
static bool hl_mt6(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ip6t_hl_info *info = par->matchinfo;
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
switch (info->mode) {
case IP6T_HL_EQ:
return ip6h->hop_limit == info->hop_limit;
case IP6T_HL_NE:
return ip6h->hop_limit != info->hop_limit;
case IP6T_HL_LT:
return ip6h->hop_limit < info->hop_limit;
case IP6T_HL_GT:
return ip6h->hop_limit > info->hop_limit;
}
return false;
}
static struct xt_match hl_mt_reg[] __read_mostly = {
{
.name = "ttl",
.revision = 0,
.family = NFPROTO_IPV4,
.match = ttl_mt,
.matchsize = sizeof(struct ipt_ttl_info),
.me = THIS_MODULE,
},
{
.name = "hl",
.revision = 0,
.family = NFPROTO_IPV6,
.match = hl_mt6,
.matchsize = sizeof(struct ip6t_hl_info),
.me = THIS_MODULE,
},
};
static int __init hl_mt_init(void)
{
return xt_register_matches(hl_mt_reg, ARRAY_SIZE(hl_mt_reg));
}
static void __exit hl_mt_exit(void)
{
xt_unregister_matches(hl_mt_reg, ARRAY_SIZE(hl_mt_reg));
}
module_init(hl_mt_init);
module_exit(hl_mt_exit);
| linux-master | net/netfilter/xt_HL.c |
/*
* xt_time
* Copyright © CC Computer Consultants GmbH, 2007
*
* based on ipt_time by Fabrice MARIE <[email protected]>
* This is a module which is used for time matching
* It is using some modified code from dietlibc (localtime() function)
* that you can find at https://www.fefe.de/dietlibc/
* This file is distributed under the terms of the GNU General Public
* License (GPL). Copies of the GPL can be obtained from gnu.org/gpl.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/ktime.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/types.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_time.h>
struct xtm {
u_int8_t month; /* (1-12) */
u_int8_t monthday; /* (1-31) */
u_int8_t weekday; /* (1-7) */
u_int8_t hour; /* (0-23) */
u_int8_t minute; /* (0-59) */
u_int8_t second; /* (0-59) */
unsigned int dse;
};
extern struct timezone sys_tz; /* ouch */
static const u_int16_t days_since_year[] = {
0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334,
};
static const u_int16_t days_since_leapyear[] = {
0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335,
};
/*
* Since time progresses forward, it is best to organize this array in reverse,
* to minimize lookup time.
*/
enum {
DSE_FIRST = 2039,
SECONDS_PER_DAY = 86400,
};
static const u_int16_t days_since_epoch[] = {
/* 2039 - 2030 */
25202, 24837, 24472, 24106, 23741, 23376, 23011, 22645, 22280, 21915,
/* 2029 - 2020 */
21550, 21184, 20819, 20454, 20089, 19723, 19358, 18993, 18628, 18262,
/* 2019 - 2010 */
17897, 17532, 17167, 16801, 16436, 16071, 15706, 15340, 14975, 14610,
/* 2009 - 2000 */
14245, 13879, 13514, 13149, 12784, 12418, 12053, 11688, 11323, 10957,
/* 1999 - 1990 */
10592, 10227, 9862, 9496, 9131, 8766, 8401, 8035, 7670, 7305,
/* 1989 - 1980 */
6940, 6574, 6209, 5844, 5479, 5113, 4748, 4383, 4018, 3652,
/* 1979 - 1970 */
3287, 2922, 2557, 2191, 1826, 1461, 1096, 730, 365, 0,
};
static inline bool is_leap(unsigned int y)
{
return y % 4 == 0 && (y % 100 != 0 || y % 400 == 0);
}
/*
* Each network packet has a (nano)seconds-since-the-epoch (SSTE) timestamp.
* Since we match against days and daytime, the SSTE value needs to be
* computed back into human-readable dates.
*
* This is done in three separate functions so that the most expensive
* calculations are done last, in case a "simple match" can be found earlier.
*/
static inline unsigned int localtime_1(struct xtm *r, time64_t time)
{
unsigned int v, w;
/* Each day has 86400s, so finding the hour/minute is actually easy. */
div_u64_rem(time, SECONDS_PER_DAY, &v);
r->second = v % 60;
w = v / 60;
r->minute = w % 60;
r->hour = w / 60;
return v;
}
static inline void localtime_2(struct xtm *r, time64_t time)
{
/*
* Here comes the rest (weekday, monthday). First, divide the SSTE
* by seconds-per-day to get the number of _days_ since the epoch.
*/
r->dse = div_u64(time, SECONDS_PER_DAY);
/*
* 1970-01-01 (w=0) was a Thursday (4).
* -1 and +1 map Sunday properly onto 7.
*/
r->weekday = (4 + r->dse - 1) % 7 + 1;
}
static void localtime_3(struct xtm *r, time64_t time)
{
unsigned int year, i, w = r->dse;
/*
* In each year, a certain number of days-since-the-epoch have passed.
* Find the year that is closest to said days.
*
* Consider, for example, w=21612 (2029-03-04). Loop will abort on
* dse[i] <= w, which happens when dse[i] == 21550. This implies
* year == 2009. w will then be 62.
*/
for (i = 0, year = DSE_FIRST; days_since_epoch[i] > w;
++i, --year)
/* just loop */;
w -= days_since_epoch[i];
/*
* By now we have the current year, and the day of the year.
* r->yearday = w;
*
* On to finding the month (like above). In each month, a certain
* number of days-since-New Year have passed, and find the closest
* one.
*
* Consider w=62 (in a non-leap year). Loop will abort on
* dsy[i] < w, which happens when dsy[i] == 31+28 (i == 2).
* Concludes i == 2, i.e. 3rd month => March.
*
* (A different approach to use would be to subtract a monthlength
* from w repeatedly while counting.)
*/
if (is_leap(year)) {
/* use days_since_leapyear[] in a leap year */
for (i = ARRAY_SIZE(days_since_leapyear) - 1;
i > 0 && days_since_leapyear[i] > w; --i)
/* just loop */;
r->monthday = w - days_since_leapyear[i] + 1;
} else {
for (i = ARRAY_SIZE(days_since_year) - 1;
i > 0 && days_since_year[i] > w; --i)
/* just loop */;
r->monthday = w - days_since_year[i] + 1;
}
r->month = i + 1;
}
static bool
time_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_time_info *info = par->matchinfo;
unsigned int packet_time;
struct xtm current_time;
time64_t stamp;
/*
* We need real time here, but we can neither use skb->tstamp
* nor __net_timestamp().
*
* skb->tstamp and skb->skb_mstamp_ns overlap, however, they
* use different clock types (real vs monotonic).
*
* Suppose you have two rules:
* 1. match before 13:00
* 2. match after 13:00
*
* If you match against processing time (ktime_get_real_seconds) it
* may happen that the same packet matches both rules if
* it arrived at the right moment before 13:00, so it would be
* better to check skb->tstamp and set it via __net_timestamp()
* if needed. This however breaks outgoing packets tx timestamp,
* and causes them to get delayed forever by fq packet scheduler.
*/
stamp = ktime_get_real_seconds();
if (info->flags & XT_TIME_LOCAL_TZ)
/* Adjust for local timezone */
stamp -= 60 * sys_tz.tz_minuteswest;
/*
* xt_time will match when _all_ of the following hold:
* - 'now' is in the global time range date_start..date_end
* - 'now' is in the monthday mask
* - 'now' is in the weekday mask
* - 'now' is in the daytime range time_start..time_end
* (and by default, libxt_time will set these so as to match)
*
* note: info->date_start/stop are unsigned 32-bit values that
* can hold values beyond y2038, but not after y2106.
*/
if (stamp < info->date_start || stamp > info->date_stop)
return false;
packet_time = localtime_1(¤t_time, stamp);
if (info->daytime_start < info->daytime_stop) {
if (packet_time < info->daytime_start ||
packet_time > info->daytime_stop)
return false;
} else {
if (packet_time < info->daytime_start &&
packet_time > info->daytime_stop)
return false;
/** if user asked to ignore 'next day', then e.g.
* '1 PM Wed, August 1st' should be treated
* like 'Tue 1 PM July 31st'.
*
* This also causes
* 'Monday, "23:00 to 01:00", to match for 2 hours, starting
* Monday 23:00 to Tuesday 01:00.
*/
if ((info->flags & XT_TIME_CONTIGUOUS) &&
packet_time <= info->daytime_stop)
stamp -= SECONDS_PER_DAY;
}
localtime_2(¤t_time, stamp);
if (!(info->weekdays_match & (1 << current_time.weekday)))
return false;
/* Do not spend time computing monthday if all days match anyway */
if (info->monthdays_match != XT_TIME_ALL_MONTHDAYS) {
localtime_3(¤t_time, stamp);
if (!(info->monthdays_match & (1 << current_time.monthday)))
return false;
}
return true;
}
static int time_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_time_info *info = par->matchinfo;
if (info->daytime_start > XT_TIME_MAX_DAYTIME ||
info->daytime_stop > XT_TIME_MAX_DAYTIME) {
pr_info_ratelimited("invalid argument - start or stop time greater than 23:59:59\n");
return -EDOM;
}
if (info->flags & ~XT_TIME_ALL_FLAGS) {
pr_info_ratelimited("unknown flags 0x%x\n",
info->flags & ~XT_TIME_ALL_FLAGS);
return -EINVAL;
}
if ((info->flags & XT_TIME_CONTIGUOUS) &&
info->daytime_start < info->daytime_stop)
return -EINVAL;
return 0;
}
static struct xt_match xt_time_mt_reg __read_mostly = {
.name = "time",
.family = NFPROTO_UNSPEC,
.match = time_mt,
.checkentry = time_mt_check,
.matchsize = sizeof(struct xt_time_info),
.me = THIS_MODULE,
};
static int __init time_mt_init(void)
{
int minutes = sys_tz.tz_minuteswest;
if (minutes < 0) /* east of Greenwich */
pr_info("kernel timezone is +%02d%02d\n",
-minutes / 60, -minutes % 60);
else /* west of Greenwich */
pr_info("kernel timezone is -%02d%02d\n",
minutes / 60, minutes % 60);
return xt_register_match(&xt_time_mt_reg);
}
static void __exit time_mt_exit(void)
{
xt_unregister_match(&xt_time_mt_reg);
}
module_init(time_mt_init);
module_exit(time_mt_exit);
MODULE_AUTHOR("Jan Engelhardt <[email protected]>");
MODULE_DESCRIPTION("Xtables: time-based matching");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_time");
MODULE_ALIAS("ip6t_time");
| linux-master | net/netfilter/xt_time.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Masquerade. Simple mapping which alters range to a local IP address
(depending on route). */
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2006 Netfilter Core Team <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/netfilter/x_tables.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_masquerade.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Netfilter Core Team <[email protected]>");
MODULE_DESCRIPTION("Xtables: automatic-address SNAT");
/* FIXME: Multiple targets. --RR */
static int masquerade_tg_check(const struct xt_tgchk_param *par)
{
const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
if (mr->range[0].flags & NF_NAT_RANGE_MAP_IPS) {
pr_debug("bad MAP_IPS.\n");
return -EINVAL;
}
if (mr->rangesize != 1) {
pr_debug("bad rangesize %u\n", mr->rangesize);
return -EINVAL;
}
return nf_ct_netns_get(par->net, par->family);
}
static unsigned int
masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
struct nf_nat_range2 range;
const struct nf_nat_ipv4_multi_range_compat *mr;
mr = par->targinfo;
range.flags = mr->range[0].flags;
range.min_proto = mr->range[0].min;
range.max_proto = mr->range[0].max;
return nf_nat_masquerade_ipv4(skb, xt_hooknum(par), &range,
xt_out(par));
}
static void masquerade_tg_destroy(const struct xt_tgdtor_param *par)
{
nf_ct_netns_put(par->net, par->family);
}
#if IS_ENABLED(CONFIG_IPV6)
static unsigned int
masquerade_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{
return nf_nat_masquerade_ipv6(skb, par->targinfo, xt_out(par));
}
static int masquerade_tg6_checkentry(const struct xt_tgchk_param *par)
{
const struct nf_nat_range2 *range = par->targinfo;
if (range->flags & NF_NAT_RANGE_MAP_IPS)
return -EINVAL;
return nf_ct_netns_get(par->net, par->family);
}
#endif
static struct xt_target masquerade_tg_reg[] __read_mostly = {
{
#if IS_ENABLED(CONFIG_IPV6)
.name = "MASQUERADE",
.family = NFPROTO_IPV6,
.target = masquerade_tg6,
.targetsize = sizeof(struct nf_nat_range),
.table = "nat",
.hooks = 1 << NF_INET_POST_ROUTING,
.checkentry = masquerade_tg6_checkentry,
.destroy = masquerade_tg_destroy,
.me = THIS_MODULE,
}, {
#endif
.name = "MASQUERADE",
.family = NFPROTO_IPV4,
.target = masquerade_tg,
.targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat),
.table = "nat",
.hooks = 1 << NF_INET_POST_ROUTING,
.checkentry = masquerade_tg_check,
.destroy = masquerade_tg_destroy,
.me = THIS_MODULE,
}
};
static int __init masquerade_tg_init(void)
{
int ret;
ret = xt_register_targets(masquerade_tg_reg,
ARRAY_SIZE(masquerade_tg_reg));
if (ret)
return ret;
ret = nf_nat_masquerade_inet_register_notifiers();
if (ret) {
xt_unregister_targets(masquerade_tg_reg,
ARRAY_SIZE(masquerade_tg_reg));
return ret;
}
return ret;
}
static void __exit masquerade_tg_exit(void)
{
xt_unregister_targets(masquerade_tg_reg, ARRAY_SIZE(masquerade_tg_reg));
nf_nat_masquerade_inet_unregister_notifiers();
}
module_init(masquerade_tg_init);
module_exit(masquerade_tg_exit);
#if IS_ENABLED(CONFIG_IPV6)
MODULE_ALIAS("ip6t_MASQUERADE");
#endif
MODULE_ALIAS("ipt_MASQUERADE");
| linux-master | net/netfilter/xt_MASQUERADE.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/net/netfilter/xt_IDLETIMER.c
*
* Netfilter module to trigger a timer when packet matches.
* After timer expires a kevent will be sent.
*
* Copyright (C) 2004, 2010 Nokia Corporation
* Written by Timo Teras <[email protected]>
*
* Converted to x_tables and reworked for upstream inclusion
* by Luciano Coelho <[email protected]>
*
* Contact: Luciano Coelho <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/alarmtimer.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/netfilter.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_IDLETIMER.h>
#include <linux/kdev_t.h>
#include <linux/kobject.h>
#include <linux/workqueue.h>
#include <linux/sysfs.h>
struct idletimer_tg {
struct list_head entry;
struct alarm alarm;
struct timer_list timer;
struct work_struct work;
struct kobject *kobj;
struct device_attribute attr;
unsigned int refcnt;
u8 timer_type;
};
static LIST_HEAD(idletimer_tg_list);
static DEFINE_MUTEX(list_mutex);
static struct kobject *idletimer_tg_kobj;
static
struct idletimer_tg *__idletimer_tg_find_by_label(const char *label)
{
struct idletimer_tg *entry;
list_for_each_entry(entry, &idletimer_tg_list, entry) {
if (!strcmp(label, entry->attr.attr.name))
return entry;
}
return NULL;
}
static ssize_t idletimer_tg_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct idletimer_tg *timer;
unsigned long expires = 0;
struct timespec64 ktimespec = {};
long time_diff = 0;
mutex_lock(&list_mutex);
timer = __idletimer_tg_find_by_label(attr->attr.name);
if (timer) {
if (timer->timer_type & XT_IDLETIMER_ALARM) {
ktime_t expires_alarm = alarm_expires_remaining(&timer->alarm);
ktimespec = ktime_to_timespec64(expires_alarm);
time_diff = ktimespec.tv_sec;
} else {
expires = timer->timer.expires;
time_diff = jiffies_to_msecs(expires - jiffies) / 1000;
}
}
mutex_unlock(&list_mutex);
if (time_after(expires, jiffies) || ktimespec.tv_sec > 0)
return sysfs_emit(buf, "%ld\n", time_diff);
return sysfs_emit(buf, "0\n");
}
static void idletimer_tg_work(struct work_struct *work)
{
struct idletimer_tg *timer = container_of(work, struct idletimer_tg,
work);
sysfs_notify(idletimer_tg_kobj, NULL, timer->attr.attr.name);
}
static void idletimer_tg_expired(struct timer_list *t)
{
struct idletimer_tg *timer = from_timer(timer, t, timer);
pr_debug("timer %s expired\n", timer->attr.attr.name);
schedule_work(&timer->work);
}
static enum alarmtimer_restart idletimer_tg_alarmproc(struct alarm *alarm,
ktime_t now)
{
struct idletimer_tg *timer = alarm->data;
pr_debug("alarm %s expired\n", timer->attr.attr.name);
schedule_work(&timer->work);
return ALARMTIMER_NORESTART;
}
static int idletimer_check_sysfs_name(const char *name, unsigned int size)
{
int ret;
ret = xt_check_proc_name(name, size);
if (ret < 0)
return ret;
if (!strcmp(name, "power") ||
!strcmp(name, "subsystem") ||
!strcmp(name, "uevent"))
return -EINVAL;
return 0;
}
static int idletimer_tg_create(struct idletimer_tg_info *info)
{
int ret;
info->timer = kzalloc(sizeof(*info->timer), GFP_KERNEL);
if (!info->timer) {
ret = -ENOMEM;
goto out;
}
ret = idletimer_check_sysfs_name(info->label, sizeof(info->label));
if (ret < 0)
goto out_free_timer;
sysfs_attr_init(&info->timer->attr.attr);
info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL);
if (!info->timer->attr.attr.name) {
ret = -ENOMEM;
goto out_free_timer;
}
info->timer->attr.attr.mode = 0444;
info->timer->attr.show = idletimer_tg_show;
ret = sysfs_create_file(idletimer_tg_kobj, &info->timer->attr.attr);
if (ret < 0) {
pr_debug("couldn't add file to sysfs");
goto out_free_attr;
}
list_add(&info->timer->entry, &idletimer_tg_list);
timer_setup(&info->timer->timer, idletimer_tg_expired, 0);
info->timer->refcnt = 1;
INIT_WORK(&info->timer->work, idletimer_tg_work);
mod_timer(&info->timer->timer,
msecs_to_jiffies(info->timeout * 1000) + jiffies);
return 0;
out_free_attr:
kfree(info->timer->attr.attr.name);
out_free_timer:
kfree(info->timer);
out:
return ret;
}
static int idletimer_tg_create_v1(struct idletimer_tg_info_v1 *info)
{
int ret;
info->timer = kmalloc(sizeof(*info->timer), GFP_KERNEL);
if (!info->timer) {
ret = -ENOMEM;
goto out;
}
ret = idletimer_check_sysfs_name(info->label, sizeof(info->label));
if (ret < 0)
goto out_free_timer;
sysfs_attr_init(&info->timer->attr.attr);
info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL);
if (!info->timer->attr.attr.name) {
ret = -ENOMEM;
goto out_free_timer;
}
info->timer->attr.attr.mode = 0444;
info->timer->attr.show = idletimer_tg_show;
ret = sysfs_create_file(idletimer_tg_kobj, &info->timer->attr.attr);
if (ret < 0) {
pr_debug("couldn't add file to sysfs");
goto out_free_attr;
}
/* notify userspace */
kobject_uevent(idletimer_tg_kobj,KOBJ_ADD);
list_add(&info->timer->entry, &idletimer_tg_list);
pr_debug("timer type value is %u", info->timer_type);
info->timer->timer_type = info->timer_type;
info->timer->refcnt = 1;
INIT_WORK(&info->timer->work, idletimer_tg_work);
if (info->timer->timer_type & XT_IDLETIMER_ALARM) {
ktime_t tout;
alarm_init(&info->timer->alarm, ALARM_BOOTTIME,
idletimer_tg_alarmproc);
info->timer->alarm.data = info->timer;
tout = ktime_set(info->timeout, 0);
alarm_start_relative(&info->timer->alarm, tout);
} else {
timer_setup(&info->timer->timer, idletimer_tg_expired, 0);
mod_timer(&info->timer->timer,
msecs_to_jiffies(info->timeout * 1000) + jiffies);
}
return 0;
out_free_attr:
kfree(info->timer->attr.attr.name);
out_free_timer:
kfree(info->timer);
out:
return ret;
}
/*
* The actual xt_tables plugin.
*/
static unsigned int idletimer_tg_target(struct sk_buff *skb,
const struct xt_action_param *par)
{
const struct idletimer_tg_info *info = par->targinfo;
pr_debug("resetting timer %s, timeout period %u\n",
info->label, info->timeout);
mod_timer(&info->timer->timer,
msecs_to_jiffies(info->timeout * 1000) + jiffies);
return XT_CONTINUE;
}
/*
* The actual xt_tables plugin.
*/
static unsigned int idletimer_tg_target_v1(struct sk_buff *skb,
const struct xt_action_param *par)
{
const struct idletimer_tg_info_v1 *info = par->targinfo;
pr_debug("resetting timer %s, timeout period %u\n",
info->label, info->timeout);
if (info->timer->timer_type & XT_IDLETIMER_ALARM) {
ktime_t tout = ktime_set(info->timeout, 0);
alarm_start_relative(&info->timer->alarm, tout);
} else {
mod_timer(&info->timer->timer,
msecs_to_jiffies(info->timeout * 1000) + jiffies);
}
return XT_CONTINUE;
}
static int idletimer_tg_helper(struct idletimer_tg_info *info)
{
if (info->timeout == 0) {
pr_debug("timeout value is zero\n");
return -EINVAL;
}
if (info->timeout >= INT_MAX / 1000) {
pr_debug("timeout value is too big\n");
return -EINVAL;
}
if (info->label[0] == '\0' ||
strnlen(info->label,
MAX_IDLETIMER_LABEL_SIZE) == MAX_IDLETIMER_LABEL_SIZE) {
pr_debug("label is empty or not nul-terminated\n");
return -EINVAL;
}
return 0;
}
static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
{
struct idletimer_tg_info *info = par->targinfo;
int ret;
pr_debug("checkentry targinfo%s\n", info->label);
ret = idletimer_tg_helper(info);
if(ret < 0)
{
pr_debug("checkentry helper return invalid\n");
return -EINVAL;
}
mutex_lock(&list_mutex);
info->timer = __idletimer_tg_find_by_label(info->label);
if (info->timer) {
info->timer->refcnt++;
mod_timer(&info->timer->timer,
msecs_to_jiffies(info->timeout * 1000) + jiffies);
pr_debug("increased refcnt of timer %s to %u\n",
info->label, info->timer->refcnt);
} else {
ret = idletimer_tg_create(info);
if (ret < 0) {
pr_debug("failed to create timer\n");
mutex_unlock(&list_mutex);
return ret;
}
}
mutex_unlock(&list_mutex);
return 0;
}
static int idletimer_tg_checkentry_v1(const struct xt_tgchk_param *par)
{
struct idletimer_tg_info_v1 *info = par->targinfo;
int ret;
pr_debug("checkentry targinfo%s\n", info->label);
if (info->send_nl_msg)
return -EOPNOTSUPP;
ret = idletimer_tg_helper((struct idletimer_tg_info *)info);
if(ret < 0)
{
pr_debug("checkentry helper return invalid\n");
return -EINVAL;
}
if (info->timer_type > XT_IDLETIMER_ALARM) {
pr_debug("invalid value for timer type\n");
return -EINVAL;
}
mutex_lock(&list_mutex);
info->timer = __idletimer_tg_find_by_label(info->label);
if (info->timer) {
if (info->timer->timer_type != info->timer_type) {
pr_debug("Adding/Replacing rule with same label and different timer type is not allowed\n");
mutex_unlock(&list_mutex);
return -EINVAL;
}
info->timer->refcnt++;
if (info->timer_type & XT_IDLETIMER_ALARM) {
/* calculate remaining expiry time */
ktime_t tout = alarm_expires_remaining(&info->timer->alarm);
struct timespec64 ktimespec = ktime_to_timespec64(tout);
if (ktimespec.tv_sec > 0) {
pr_debug("time_expiry_remaining %lld\n",
ktimespec.tv_sec);
alarm_start_relative(&info->timer->alarm, tout);
}
} else {
mod_timer(&info->timer->timer,
msecs_to_jiffies(info->timeout * 1000) + jiffies);
}
pr_debug("increased refcnt of timer %s to %u\n",
info->label, info->timer->refcnt);
} else {
ret = idletimer_tg_create_v1(info);
if (ret < 0) {
pr_debug("failed to create timer\n");
mutex_unlock(&list_mutex);
return ret;
}
}
mutex_unlock(&list_mutex);
return 0;
}
static void idletimer_tg_destroy(const struct xt_tgdtor_param *par)
{
const struct idletimer_tg_info *info = par->targinfo;
pr_debug("destroy targinfo %s\n", info->label);
mutex_lock(&list_mutex);
if (--info->timer->refcnt == 0) {
pr_debug("deleting timer %s\n", info->label);
list_del(&info->timer->entry);
timer_shutdown_sync(&info->timer->timer);
cancel_work_sync(&info->timer->work);
sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
kfree(info->timer->attr.attr.name);
kfree(info->timer);
} else {
pr_debug("decreased refcnt of timer %s to %u\n",
info->label, info->timer->refcnt);
}
mutex_unlock(&list_mutex);
}
static void idletimer_tg_destroy_v1(const struct xt_tgdtor_param *par)
{
const struct idletimer_tg_info_v1 *info = par->targinfo;
pr_debug("destroy targinfo %s\n", info->label);
mutex_lock(&list_mutex);
if (--info->timer->refcnt == 0) {
pr_debug("deleting timer %s\n", info->label);
list_del(&info->timer->entry);
if (info->timer->timer_type & XT_IDLETIMER_ALARM) {
alarm_cancel(&info->timer->alarm);
} else {
timer_shutdown_sync(&info->timer->timer);
}
cancel_work_sync(&info->timer->work);
sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
kfree(info->timer->attr.attr.name);
kfree(info->timer);
} else {
pr_debug("decreased refcnt of timer %s to %u\n",
info->label, info->timer->refcnt);
}
mutex_unlock(&list_mutex);
}
static struct xt_target idletimer_tg[] __read_mostly = {
{
.name = "IDLETIMER",
.family = NFPROTO_UNSPEC,
.target = idletimer_tg_target,
.targetsize = sizeof(struct idletimer_tg_info),
.usersize = offsetof(struct idletimer_tg_info, timer),
.checkentry = idletimer_tg_checkentry,
.destroy = idletimer_tg_destroy,
.me = THIS_MODULE,
},
{
.name = "IDLETIMER",
.family = NFPROTO_UNSPEC,
.revision = 1,
.target = idletimer_tg_target_v1,
.targetsize = sizeof(struct idletimer_tg_info_v1),
.usersize = offsetof(struct idletimer_tg_info_v1, timer),
.checkentry = idletimer_tg_checkentry_v1,
.destroy = idletimer_tg_destroy_v1,
.me = THIS_MODULE,
},
};
static struct class *idletimer_tg_class;
static struct device *idletimer_tg_device;
static int __init idletimer_tg_init(void)
{
int err;
idletimer_tg_class = class_create("xt_idletimer");
err = PTR_ERR(idletimer_tg_class);
if (IS_ERR(idletimer_tg_class)) {
pr_debug("couldn't register device class\n");
goto out;
}
idletimer_tg_device = device_create(idletimer_tg_class, NULL,
MKDEV(0, 0), NULL, "timers");
err = PTR_ERR(idletimer_tg_device);
if (IS_ERR(idletimer_tg_device)) {
pr_debug("couldn't register system device\n");
goto out_class;
}
idletimer_tg_kobj = &idletimer_tg_device->kobj;
err = xt_register_targets(idletimer_tg, ARRAY_SIZE(idletimer_tg));
if (err < 0) {
pr_debug("couldn't register xt target\n");
goto out_dev;
}
return 0;
out_dev:
device_destroy(idletimer_tg_class, MKDEV(0, 0));
out_class:
class_destroy(idletimer_tg_class);
out:
return err;
}
static void __exit idletimer_tg_exit(void)
{
xt_unregister_targets(idletimer_tg, ARRAY_SIZE(idletimer_tg));
device_destroy(idletimer_tg_class, MKDEV(0, 0));
class_destroy(idletimer_tg_class);
}
module_init(idletimer_tg_init);
module_exit(idletimer_tg_exit);
MODULE_AUTHOR("Timo Teras <[email protected]>");
MODULE_AUTHOR("Luciano Coelho <[email protected]>");
MODULE_DESCRIPTION("Xtables: idle time monitor");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("ipt_IDLETIMER");
MODULE_ALIAS("ip6t_IDLETIMER");
| linux-master | net/netfilter/xt_IDLETIMER.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016 Pablo Neira Ayuso <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_tables.h>
struct nft_range_expr {
struct nft_data data_from;
struct nft_data data_to;
u8 sreg;
u8 len;
enum nft_range_ops op:8;
};
void nft_range_eval(const struct nft_expr *expr,
struct nft_regs *regs, const struct nft_pktinfo *pkt)
{
const struct nft_range_expr *priv = nft_expr_priv(expr);
int d1, d2;
d1 = memcmp(®s->data[priv->sreg], &priv->data_from, priv->len);
d2 = memcmp(®s->data[priv->sreg], &priv->data_to, priv->len);
switch (priv->op) {
case NFT_RANGE_EQ:
if (d1 < 0 || d2 > 0)
regs->verdict.code = NFT_BREAK;
break;
case NFT_RANGE_NEQ:
if (d1 >= 0 && d2 <= 0)
regs->verdict.code = NFT_BREAK;
break;
}
}
static const struct nla_policy nft_range_policy[NFTA_RANGE_MAX + 1] = {
[NFTA_RANGE_SREG] = { .type = NLA_U32 },
[NFTA_RANGE_OP] = NLA_POLICY_MAX(NLA_BE32, 255),
[NFTA_RANGE_FROM_DATA] = { .type = NLA_NESTED },
[NFTA_RANGE_TO_DATA] = { .type = NLA_NESTED },
};
static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_range_expr *priv = nft_expr_priv(expr);
struct nft_data_desc desc_from = {
.type = NFT_DATA_VALUE,
.size = sizeof(priv->data_from),
};
struct nft_data_desc desc_to = {
.type = NFT_DATA_VALUE,
.size = sizeof(priv->data_to),
};
int err;
u32 op;
if (!tb[NFTA_RANGE_SREG] ||
!tb[NFTA_RANGE_OP] ||
!tb[NFTA_RANGE_FROM_DATA] ||
!tb[NFTA_RANGE_TO_DATA])
return -EINVAL;
err = nft_data_init(NULL, &priv->data_from, &desc_from,
tb[NFTA_RANGE_FROM_DATA]);
if (err < 0)
return err;
err = nft_data_init(NULL, &priv->data_to, &desc_to,
tb[NFTA_RANGE_TO_DATA]);
if (err < 0)
goto err1;
if (desc_from.len != desc_to.len) {
err = -EINVAL;
goto err2;
}
err = nft_parse_register_load(tb[NFTA_RANGE_SREG], &priv->sreg,
desc_from.len);
if (err < 0)
goto err2;
err = nft_parse_u32_check(tb[NFTA_RANGE_OP], U8_MAX, &op);
if (err < 0)
goto err2;
switch (op) {
case NFT_RANGE_EQ:
case NFT_RANGE_NEQ:
break;
default:
err = -EINVAL;
goto err2;
}
priv->op = op;
priv->len = desc_from.len;
return 0;
err2:
nft_data_release(&priv->data_to, desc_to.type);
err1:
nft_data_release(&priv->data_from, desc_from.type);
return err;
}
static int nft_range_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_range_expr *priv = nft_expr_priv(expr);
if (nft_dump_register(skb, NFTA_RANGE_SREG, priv->sreg))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_RANGE_OP, htonl(priv->op)))
goto nla_put_failure;
if (nft_data_dump(skb, NFTA_RANGE_FROM_DATA, &priv->data_from,
NFT_DATA_VALUE, priv->len) < 0 ||
nft_data_dump(skb, NFTA_RANGE_TO_DATA, &priv->data_to,
NFT_DATA_VALUE, priv->len) < 0)
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static const struct nft_expr_ops nft_range_ops = {
.type = &nft_range_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_range_expr)),
.eval = nft_range_eval,
.init = nft_range_init,
.dump = nft_range_dump,
.reduce = NFT_REDUCE_READONLY,
};
struct nft_expr_type nft_range_type __read_mostly = {
.name = "range",
.ops = &nft_range_ops,
.policy = nft_range_policy,
.maxattr = NFTA_RANGE_MAX,
.owner = THIS_MODULE,
};
| linux-master | net/netfilter/nft_range.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* This module is used to copy security markings from packets
* to connections, and restore security markings from connections
* back to packets. This would normally be performed in conjunction
* with the SECMARK target and state match.
*
* Based somewhat on CONNMARK:
* Copyright (C) 2002,2004 MARA Systems AB <https://www.marasystems.com>
* by Henrik Nordstrom <[email protected]>
*
* (C) 2006,2008 Red Hat, Inc., James Morris <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_CONNSECMARK.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_ecache.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("James Morris <[email protected]>");
MODULE_DESCRIPTION("Xtables: target for copying between connection and security mark");
MODULE_ALIAS("ipt_CONNSECMARK");
MODULE_ALIAS("ip6t_CONNSECMARK");
/*
* If the packet has a security mark and the connection does not, copy
* the security mark from the packet to the connection.
*/
static void secmark_save(const struct sk_buff *skb)
{
if (skb->secmark) {
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
ct = nf_ct_get(skb, &ctinfo);
if (ct && !ct->secmark) {
ct->secmark = skb->secmark;
nf_conntrack_event_cache(IPCT_SECMARK, ct);
}
}
}
/*
* If packet has no security mark, and the connection does, restore the
* security mark from the connection to the packet.
*/
static void secmark_restore(struct sk_buff *skb)
{
if (!skb->secmark) {
const struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
ct = nf_ct_get(skb, &ctinfo);
if (ct && ct->secmark)
skb->secmark = ct->secmark;
}
}
static unsigned int
connsecmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_connsecmark_target_info *info = par->targinfo;
switch (info->mode) {
case CONNSECMARK_SAVE:
secmark_save(skb);
break;
case CONNSECMARK_RESTORE:
secmark_restore(skb);
break;
default:
BUG();
}
return XT_CONTINUE;
}
static int connsecmark_tg_check(const struct xt_tgchk_param *par)
{
const struct xt_connsecmark_target_info *info = par->targinfo;
int ret;
if (strcmp(par->table, "mangle") != 0 &&
strcmp(par->table, "security") != 0) {
pr_info_ratelimited("only valid in \'mangle\' or \'security\' table, not \'%s\'\n",
par->table);
return -EINVAL;
}
switch (info->mode) {
case CONNSECMARK_SAVE:
case CONNSECMARK_RESTORE:
break;
default:
pr_info_ratelimited("invalid mode: %hu\n", info->mode);
return -EINVAL;
}
ret = nf_ct_netns_get(par->net, par->family);
if (ret < 0)
pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
par->family);
return ret;
}
static void connsecmark_tg_destroy(const struct xt_tgdtor_param *par)
{
nf_ct_netns_put(par->net, par->family);
}
static struct xt_target connsecmark_tg_reg __read_mostly = {
.name = "CONNSECMARK",
.revision = 0,
.family = NFPROTO_UNSPEC,
.checkentry = connsecmark_tg_check,
.destroy = connsecmark_tg_destroy,
.target = connsecmark_tg,
.targetsize = sizeof(struct xt_connsecmark_target_info),
.me = THIS_MODULE,
};
static int __init connsecmark_tg_init(void)
{
return xt_register_target(&connsecmark_tg_reg);
}
static void __exit connsecmark_tg_exit(void)
{
xt_unregister_target(&connsecmark_tg_reg);
}
module_init(connsecmark_tg_init);
module_exit(connsecmark_tg_exit);
| linux-master | net/netfilter/xt_CONNSECMARK.c |
// SPDX-License-Identifier: GPL-2.0-only
/* SIP extension for IP connection tracking.
*
* (C) 2005 by Christian Hentschel <[email protected]>
* based on RR's ip_conntrack_ftp.c and other modules.
* (C) 2007 United Security Providers
* (C) 2007, 2008 Patrick McHardy <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/skbuff.h>
#include <linux/inet.h>
#include <linux/in.h>
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <linux/netfilter/nf_conntrack_sip.h>
#define HELPER_NAME "sip"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Hentschel <[email protected]>");
MODULE_DESCRIPTION("SIP connection tracking helper");
MODULE_ALIAS("ip_conntrack_sip");
MODULE_ALIAS_NFCT_HELPER(HELPER_NAME);
#define MAX_PORTS 8
static unsigned short ports[MAX_PORTS];
static unsigned int ports_c;
module_param_array(ports, ushort, &ports_c, 0400);
MODULE_PARM_DESC(ports, "port numbers of SIP servers");
static unsigned int sip_timeout __read_mostly = SIP_TIMEOUT;
module_param(sip_timeout, uint, 0600);
MODULE_PARM_DESC(sip_timeout, "timeout for the master SIP session");
static int sip_direct_signalling __read_mostly = 1;
module_param(sip_direct_signalling, int, 0600);
MODULE_PARM_DESC(sip_direct_signalling, "expect incoming calls from registrar "
"only (default 1)");
static int sip_direct_media __read_mostly = 1;
module_param(sip_direct_media, int, 0600);
MODULE_PARM_DESC(sip_direct_media, "Expect Media streams between signalling "
"endpoints only (default 1)");
static int sip_external_media __read_mostly = 0;
module_param(sip_external_media, int, 0600);
MODULE_PARM_DESC(sip_external_media, "Expect Media streams between external "
"endpoints (default 0)");
const struct nf_nat_sip_hooks __rcu *nf_nat_sip_hooks;
EXPORT_SYMBOL_GPL(nf_nat_sip_hooks);
static int string_len(const struct nf_conn *ct, const char *dptr,
const char *limit, int *shift)
{
int len = 0;
while (dptr < limit && isalpha(*dptr)) {
dptr++;
len++;
}
return len;
}
static int digits_len(const struct nf_conn *ct, const char *dptr,
const char *limit, int *shift)
{
int len = 0;
while (dptr < limit && isdigit(*dptr)) {
dptr++;
len++;
}
return len;
}
static int iswordc(const char c)
{
if (isalnum(c) || c == '!' || c == '"' || c == '%' ||
(c >= '(' && c <= '+') || c == ':' || c == '<' || c == '>' ||
c == '?' || (c >= '[' && c <= ']') || c == '_' || c == '`' ||
c == '{' || c == '}' || c == '~' || (c >= '-' && c <= '/') ||
c == '\'')
return 1;
return 0;
}
static int word_len(const char *dptr, const char *limit)
{
int len = 0;
while (dptr < limit && iswordc(*dptr)) {
dptr++;
len++;
}
return len;
}
static int callid_len(const struct nf_conn *ct, const char *dptr,
const char *limit, int *shift)
{
int len, domain_len;
len = word_len(dptr, limit);
dptr += len;
if (!len || dptr == limit || *dptr != '@')
return len;
dptr++;
len++;
domain_len = word_len(dptr, limit);
if (!domain_len)
return 0;
return len + domain_len;
}
/* get media type + port length */
static int media_len(const struct nf_conn *ct, const char *dptr,
const char *limit, int *shift)
{
int len = string_len(ct, dptr, limit, shift);
dptr += len;
if (dptr >= limit || *dptr != ' ')
return 0;
len++;
dptr++;
return len + digits_len(ct, dptr, limit, shift);
}
static int sip_parse_addr(const struct nf_conn *ct, const char *cp,
const char **endp, union nf_inet_addr *addr,
const char *limit, bool delim)
{
const char *end;
int ret;
if (!ct)
return 0;
memset(addr, 0, sizeof(*addr));
switch (nf_ct_l3num(ct)) {
case AF_INET:
ret = in4_pton(cp, limit - cp, (u8 *)&addr->ip, -1, &end);
if (ret == 0)
return 0;
break;
case AF_INET6:
if (cp < limit && *cp == '[')
cp++;
else if (delim)
return 0;
ret = in6_pton(cp, limit - cp, (u8 *)&addr->ip6, -1, &end);
if (ret == 0)
return 0;
if (end < limit && *end == ']')
end++;
else if (delim)
return 0;
break;
default:
BUG();
}
if (endp)
*endp = end;
return 1;
}
/* skip ip address. returns its length. */
static int epaddr_len(const struct nf_conn *ct, const char *dptr,
const char *limit, int *shift)
{
union nf_inet_addr addr;
const char *aux = dptr;
if (!sip_parse_addr(ct, dptr, &dptr, &addr, limit, true)) {
pr_debug("ip: %s parse failed.!\n", dptr);
return 0;
}
/* Port number */
if (*dptr == ':') {
dptr++;
dptr += digits_len(ct, dptr, limit, shift);
}
return dptr - aux;
}
/* get address length, skiping user info. */
static int skp_epaddr_len(const struct nf_conn *ct, const char *dptr,
const char *limit, int *shift)
{
const char *start = dptr;
int s = *shift;
/* Search for @, but stop at the end of the line.
* We are inside a sip: URI, so we don't need to worry about
* continuation lines. */
while (dptr < limit &&
*dptr != '@' && *dptr != '\r' && *dptr != '\n') {
(*shift)++;
dptr++;
}
if (dptr < limit && *dptr == '@') {
dptr++;
(*shift)++;
} else {
dptr = start;
*shift = s;
}
return epaddr_len(ct, dptr, limit, shift);
}
/* Parse a SIP request line of the form:
*
* Request-Line = Method SP Request-URI SP SIP-Version CRLF
*
* and return the offset and length of the address contained in the Request-URI.
*/
int ct_sip_parse_request(const struct nf_conn *ct,
const char *dptr, unsigned int datalen,
unsigned int *matchoff, unsigned int *matchlen,
union nf_inet_addr *addr, __be16 *port)
{
const char *start = dptr, *limit = dptr + datalen, *end;
unsigned int mlen;
unsigned int p;
int shift = 0;
/* Skip method and following whitespace */
mlen = string_len(ct, dptr, limit, NULL);
if (!mlen)
return 0;
dptr += mlen;
if (++dptr >= limit)
return 0;
/* Find SIP URI */
for (; dptr < limit - strlen("sip:"); dptr++) {
if (*dptr == '\r' || *dptr == '\n')
return -1;
if (strncasecmp(dptr, "sip:", strlen("sip:")) == 0) {
dptr += strlen("sip:");
break;
}
}
if (!skp_epaddr_len(ct, dptr, limit, &shift))
return 0;
dptr += shift;
if (!sip_parse_addr(ct, dptr, &end, addr, limit, true))
return -1;
if (end < limit && *end == ':') {
end++;
p = simple_strtoul(end, (char **)&end, 10);
if (p < 1024 || p > 65535)
return -1;
*port = htons(p);
} else
*port = htons(SIP_PORT);
if (end == dptr)
return 0;
*matchoff = dptr - start;
*matchlen = end - dptr;
return 1;
}
EXPORT_SYMBOL_GPL(ct_sip_parse_request);
/* SIP header parsing: SIP headers are located at the beginning of a line, but
* may span several lines, in which case the continuation lines begin with a
* whitespace character. RFC 2543 allows lines to be terminated with CR, LF or
* CRLF, RFC 3261 allows only CRLF, we support both.
*
* Headers are followed by (optionally) whitespace, a colon, again (optionally)
* whitespace and the values. Whitespace in this context means any amount of
* tabs, spaces and continuation lines, which are treated as a single whitespace
* character.
*
* Some headers may appear multiple times. A comma separated list of values is
* equivalent to multiple headers.
*/
static const struct sip_header ct_sip_hdrs[] = {
[SIP_HDR_CSEQ] = SIP_HDR("CSeq", NULL, NULL, digits_len),
[SIP_HDR_FROM] = SIP_HDR("From", "f", "sip:", skp_epaddr_len),
[SIP_HDR_TO] = SIP_HDR("To", "t", "sip:", skp_epaddr_len),
[SIP_HDR_CONTACT] = SIP_HDR("Contact", "m", "sip:", skp_epaddr_len),
[SIP_HDR_VIA_UDP] = SIP_HDR("Via", "v", "UDP ", epaddr_len),
[SIP_HDR_VIA_TCP] = SIP_HDR("Via", "v", "TCP ", epaddr_len),
[SIP_HDR_EXPIRES] = SIP_HDR("Expires", NULL, NULL, digits_len),
[SIP_HDR_CONTENT_LENGTH] = SIP_HDR("Content-Length", "l", NULL, digits_len),
[SIP_HDR_CALL_ID] = SIP_HDR("Call-Id", "i", NULL, callid_len),
};
static const char *sip_follow_continuation(const char *dptr, const char *limit)
{
/* Walk past newline */
if (++dptr >= limit)
return NULL;
/* Skip '\n' in CR LF */
if (*(dptr - 1) == '\r' && *dptr == '\n') {
if (++dptr >= limit)
return NULL;
}
/* Continuation line? */
if (*dptr != ' ' && *dptr != '\t')
return NULL;
/* skip leading whitespace */
for (; dptr < limit; dptr++) {
if (*dptr != ' ' && *dptr != '\t')
break;
}
return dptr;
}
static const char *sip_skip_whitespace(const char *dptr, const char *limit)
{
for (; dptr < limit; dptr++) {
if (*dptr == ' ' || *dptr == '\t')
continue;
if (*dptr != '\r' && *dptr != '\n')
break;
dptr = sip_follow_continuation(dptr, limit);
break;
}
return dptr;
}
/* Search within a SIP header value, dealing with continuation lines */
static const char *ct_sip_header_search(const char *dptr, const char *limit,
const char *needle, unsigned int len)
{
for (limit -= len; dptr < limit; dptr++) {
if (*dptr == '\r' || *dptr == '\n') {
dptr = sip_follow_continuation(dptr, limit);
if (dptr == NULL)
break;
continue;
}
if (strncasecmp(dptr, needle, len) == 0)
return dptr;
}
return NULL;
}
int ct_sip_get_header(const struct nf_conn *ct, const char *dptr,
unsigned int dataoff, unsigned int datalen,
enum sip_header_types type,
unsigned int *matchoff, unsigned int *matchlen)
{
const struct sip_header *hdr = &ct_sip_hdrs[type];
const char *start = dptr, *limit = dptr + datalen;
int shift = 0;
for (dptr += dataoff; dptr < limit; dptr++) {
/* Find beginning of line */
if (*dptr != '\r' && *dptr != '\n')
continue;
if (++dptr >= limit)
break;
if (*(dptr - 1) == '\r' && *dptr == '\n') {
if (++dptr >= limit)
break;
}
/* Skip continuation lines */
if (*dptr == ' ' || *dptr == '\t')
continue;
/* Find header. Compact headers must be followed by a
* non-alphabetic character to avoid mismatches. */
if (limit - dptr >= hdr->len &&
strncasecmp(dptr, hdr->name, hdr->len) == 0)
dptr += hdr->len;
else if (hdr->cname && limit - dptr >= hdr->clen + 1 &&
strncasecmp(dptr, hdr->cname, hdr->clen) == 0 &&
!isalpha(*(dptr + hdr->clen)))
dptr += hdr->clen;
else
continue;
/* Find and skip colon */
dptr = sip_skip_whitespace(dptr, limit);
if (dptr == NULL)
break;
if (*dptr != ':' || ++dptr >= limit)
break;
/* Skip whitespace after colon */
dptr = sip_skip_whitespace(dptr, limit);
if (dptr == NULL)
break;
*matchoff = dptr - start;
if (hdr->search) {
dptr = ct_sip_header_search(dptr, limit, hdr->search,
hdr->slen);
if (!dptr)
return -1;
dptr += hdr->slen;
}
*matchlen = hdr->match_len(ct, dptr, limit, &shift);
if (!*matchlen)
return -1;
*matchoff = dptr - start + shift;
return 1;
}
return 0;
}
EXPORT_SYMBOL_GPL(ct_sip_get_header);
/* Get next header field in a list of comma separated values */
static int ct_sip_next_header(const struct nf_conn *ct, const char *dptr,
unsigned int dataoff, unsigned int datalen,
enum sip_header_types type,
unsigned int *matchoff, unsigned int *matchlen)
{
const struct sip_header *hdr = &ct_sip_hdrs[type];
const char *start = dptr, *limit = dptr + datalen;
int shift = 0;
dptr += dataoff;
dptr = ct_sip_header_search(dptr, limit, ",", strlen(","));
if (!dptr)
return 0;
dptr = ct_sip_header_search(dptr, limit, hdr->search, hdr->slen);
if (!dptr)
return 0;
dptr += hdr->slen;
*matchoff = dptr - start;
*matchlen = hdr->match_len(ct, dptr, limit, &shift);
if (!*matchlen)
return -1;
*matchoff += shift;
return 1;
}
/* Walk through headers until a parsable one is found or no header of the
* given type is left. */
static int ct_sip_walk_headers(const struct nf_conn *ct, const char *dptr,
unsigned int dataoff, unsigned int datalen,
enum sip_header_types type, int *in_header,
unsigned int *matchoff, unsigned int *matchlen)
{
int ret;
if (in_header && *in_header) {
while (1) {
ret = ct_sip_next_header(ct, dptr, dataoff, datalen,
type, matchoff, matchlen);
if (ret > 0)
return ret;
if (ret == 0)
break;
dataoff = *matchoff;
}
*in_header = 0;
}
while (1) {
ret = ct_sip_get_header(ct, dptr, dataoff, datalen,
type, matchoff, matchlen);
if (ret > 0)
break;
if (ret == 0)
return ret;
dataoff = *matchoff;
}
if (in_header)
*in_header = 1;
return 1;
}
/* Locate a SIP header, parse the URI and return the offset and length of
* the address as well as the address and port themselves. A stream of
* headers can be parsed by handing in a non-NULL datalen and in_header
* pointer.
*/
int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr,
unsigned int *dataoff, unsigned int datalen,
enum sip_header_types type, int *in_header,
unsigned int *matchoff, unsigned int *matchlen,
union nf_inet_addr *addr, __be16 *port)
{
const char *c, *limit = dptr + datalen;
unsigned int p;
int ret;
ret = ct_sip_walk_headers(ct, dptr, dataoff ? *dataoff : 0, datalen,
type, in_header, matchoff, matchlen);
WARN_ON(ret < 0);
if (ret == 0)
return ret;
if (!sip_parse_addr(ct, dptr + *matchoff, &c, addr, limit, true))
return -1;
if (*c == ':') {
c++;
p = simple_strtoul(c, (char **)&c, 10);
if (p < 1024 || p > 65535)
return -1;
*port = htons(p);
} else
*port = htons(SIP_PORT);
if (dataoff)
*dataoff = c - dptr;
return 1;
}
EXPORT_SYMBOL_GPL(ct_sip_parse_header_uri);
static int ct_sip_parse_param(const struct nf_conn *ct, const char *dptr,
unsigned int dataoff, unsigned int datalen,
const char *name,
unsigned int *matchoff, unsigned int *matchlen)
{
const char *limit = dptr + datalen;
const char *start;
const char *end;
limit = ct_sip_header_search(dptr + dataoff, limit, ",", strlen(","));
if (!limit)
limit = dptr + datalen;
start = ct_sip_header_search(dptr + dataoff, limit, name, strlen(name));
if (!start)
return 0;
start += strlen(name);
end = ct_sip_header_search(start, limit, ";", strlen(";"));
if (!end)
end = limit;
*matchoff = start - dptr;
*matchlen = end - start;
return 1;
}
/* Parse address from header parameter and return address, offset and length */
int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr,
unsigned int dataoff, unsigned int datalen,
const char *name,
unsigned int *matchoff, unsigned int *matchlen,
union nf_inet_addr *addr, bool delim)
{
const char *limit = dptr + datalen;
const char *start, *end;
limit = ct_sip_header_search(dptr + dataoff, limit, ",", strlen(","));
if (!limit)
limit = dptr + datalen;
start = ct_sip_header_search(dptr + dataoff, limit, name, strlen(name));
if (!start)
return 0;
start += strlen(name);
if (!sip_parse_addr(ct, start, &end, addr, limit, delim))
return 0;
*matchoff = start - dptr;
*matchlen = end - start;
return 1;
}
EXPORT_SYMBOL_GPL(ct_sip_parse_address_param);
/* Parse numerical header parameter and return value, offset and length */
int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr,
unsigned int dataoff, unsigned int datalen,
const char *name,
unsigned int *matchoff, unsigned int *matchlen,
unsigned int *val)
{
const char *limit = dptr + datalen;
const char *start;
char *end;
limit = ct_sip_header_search(dptr + dataoff, limit, ",", strlen(","));
if (!limit)
limit = dptr + datalen;
start = ct_sip_header_search(dptr + dataoff, limit, name, strlen(name));
if (!start)
return 0;
start += strlen(name);
*val = simple_strtoul(start, &end, 0);
if (start == end)
return -1;
if (matchoff && matchlen) {
*matchoff = start - dptr;
*matchlen = end - start;
}
return 1;
}
EXPORT_SYMBOL_GPL(ct_sip_parse_numerical_param);
static int ct_sip_parse_transport(struct nf_conn *ct, const char *dptr,
unsigned int dataoff, unsigned int datalen,
u8 *proto)
{
unsigned int matchoff, matchlen;
if (ct_sip_parse_param(ct, dptr, dataoff, datalen, "transport=",
&matchoff, &matchlen)) {
if (!strncasecmp(dptr + matchoff, "TCP", strlen("TCP")))
*proto = IPPROTO_TCP;
else if (!strncasecmp(dptr + matchoff, "UDP", strlen("UDP")))
*proto = IPPROTO_UDP;
else
return 0;
if (*proto != nf_ct_protonum(ct))
return 0;
} else
*proto = nf_ct_protonum(ct);
return 1;
}
static int sdp_parse_addr(const struct nf_conn *ct, const char *cp,
const char **endp, union nf_inet_addr *addr,
const char *limit)
{
const char *end;
int ret;
memset(addr, 0, sizeof(*addr));
switch (nf_ct_l3num(ct)) {
case AF_INET:
ret = in4_pton(cp, limit - cp, (u8 *)&addr->ip, -1, &end);
break;
case AF_INET6:
ret = in6_pton(cp, limit - cp, (u8 *)&addr->ip6, -1, &end);
break;
default:
BUG();
}
if (ret == 0)
return 0;
if (endp)
*endp = end;
return 1;
}
/* skip ip address. returns its length. */
static int sdp_addr_len(const struct nf_conn *ct, const char *dptr,
const char *limit, int *shift)
{
union nf_inet_addr addr;
const char *aux = dptr;
if (!sdp_parse_addr(ct, dptr, &dptr, &addr, limit)) {
pr_debug("ip: %s parse failed.!\n", dptr);
return 0;
}
return dptr - aux;
}
/* SDP header parsing: a SDP session description contains an ordered set of
* headers, starting with a section containing general session parameters,
* optionally followed by multiple media descriptions.
*
* SDP headers always start at the beginning of a line. According to RFC 2327:
* "The sequence CRLF (0x0d0a) is used to end a record, although parsers should
* be tolerant and also accept records terminated with a single newline
* character". We handle both cases.
*/
static const struct sip_header ct_sdp_hdrs_v4[] = {
[SDP_HDR_VERSION] = SDP_HDR("v=", NULL, digits_len),
[SDP_HDR_OWNER] = SDP_HDR("o=", "IN IP4 ", sdp_addr_len),
[SDP_HDR_CONNECTION] = SDP_HDR("c=", "IN IP4 ", sdp_addr_len),
[SDP_HDR_MEDIA] = SDP_HDR("m=", NULL, media_len),
};
static const struct sip_header ct_sdp_hdrs_v6[] = {
[SDP_HDR_VERSION] = SDP_HDR("v=", NULL, digits_len),
[SDP_HDR_OWNER] = SDP_HDR("o=", "IN IP6 ", sdp_addr_len),
[SDP_HDR_CONNECTION] = SDP_HDR("c=", "IN IP6 ", sdp_addr_len),
[SDP_HDR_MEDIA] = SDP_HDR("m=", NULL, media_len),
};
/* Linear string search within SDP header values */
static const char *ct_sdp_header_search(const char *dptr, const char *limit,
const char *needle, unsigned int len)
{
for (limit -= len; dptr < limit; dptr++) {
if (*dptr == '\r' || *dptr == '\n')
break;
if (strncmp(dptr, needle, len) == 0)
return dptr;
}
return NULL;
}
/* Locate a SDP header (optionally a substring within the header value),
* optionally stopping at the first occurrence of the term header, parse
* it and return the offset and length of the data we're interested in.
*/
int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr,
unsigned int dataoff, unsigned int datalen,
enum sdp_header_types type,
enum sdp_header_types term,
unsigned int *matchoff, unsigned int *matchlen)
{
const struct sip_header *hdrs, *hdr, *thdr;
const char *start = dptr, *limit = dptr + datalen;
int shift = 0;
hdrs = nf_ct_l3num(ct) == NFPROTO_IPV4 ? ct_sdp_hdrs_v4 : ct_sdp_hdrs_v6;
hdr = &hdrs[type];
thdr = &hdrs[term];
for (dptr += dataoff; dptr < limit; dptr++) {
/* Find beginning of line */
if (*dptr != '\r' && *dptr != '\n')
continue;
if (++dptr >= limit)
break;
if (*(dptr - 1) == '\r' && *dptr == '\n') {
if (++dptr >= limit)
break;
}
if (term != SDP_HDR_UNSPEC &&
limit - dptr >= thdr->len &&
strncasecmp(dptr, thdr->name, thdr->len) == 0)
break;
else if (limit - dptr >= hdr->len &&
strncasecmp(dptr, hdr->name, hdr->len) == 0)
dptr += hdr->len;
else
continue;
*matchoff = dptr - start;
if (hdr->search) {
dptr = ct_sdp_header_search(dptr, limit, hdr->search,
hdr->slen);
if (!dptr)
return -1;
dptr += hdr->slen;
}
*matchlen = hdr->match_len(ct, dptr, limit, &shift);
if (!*matchlen)
return -1;
*matchoff = dptr - start + shift;
return 1;
}
return 0;
}
EXPORT_SYMBOL_GPL(ct_sip_get_sdp_header);
static int ct_sip_parse_sdp_addr(const struct nf_conn *ct, const char *dptr,
unsigned int dataoff, unsigned int datalen,
enum sdp_header_types type,
enum sdp_header_types term,
unsigned int *matchoff, unsigned int *matchlen,
union nf_inet_addr *addr)
{
int ret;
ret = ct_sip_get_sdp_header(ct, dptr, dataoff, datalen, type, term,
matchoff, matchlen);
if (ret <= 0)
return ret;
if (!sdp_parse_addr(ct, dptr + *matchoff, NULL, addr,
dptr + *matchoff + *matchlen))
return -1;
return 1;
}
static int refresh_signalling_expectation(struct nf_conn *ct,
union nf_inet_addr *addr,
u8 proto, __be16 port,
unsigned int expires)
{
struct nf_conn_help *help = nfct_help(ct);
struct nf_conntrack_expect *exp;
struct hlist_node *next;
int found = 0;
spin_lock_bh(&nf_conntrack_expect_lock);
hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
if (exp->class != SIP_EXPECT_SIGNALLING ||
!nf_inet_addr_cmp(&exp->tuple.dst.u3, addr) ||
exp->tuple.dst.protonum != proto ||
exp->tuple.dst.u.udp.port != port)
continue;
if (mod_timer_pending(&exp->timeout, jiffies + expires * HZ)) {
exp->flags &= ~NF_CT_EXPECT_INACTIVE;
found = 1;
break;
}
}
spin_unlock_bh(&nf_conntrack_expect_lock);
return found;
}
static void flush_expectations(struct nf_conn *ct, bool media)
{
struct nf_conn_help *help = nfct_help(ct);
struct nf_conntrack_expect *exp;
struct hlist_node *next;
spin_lock_bh(&nf_conntrack_expect_lock);
hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
if ((exp->class != SIP_EXPECT_SIGNALLING) ^ media)
continue;
if (!nf_ct_remove_expect(exp))
continue;
if (!media)
break;
}
spin_unlock_bh(&nf_conntrack_expect_lock);
}
static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
unsigned int dataoff,
const char **dptr, unsigned int *datalen,
union nf_inet_addr *daddr, __be16 port,
enum sip_expectation_classes class,
unsigned int mediaoff, unsigned int medialen)
{
struct nf_conntrack_expect *exp, *rtp_exp, *rtcp_exp;
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
struct net *net = nf_ct_net(ct);
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
union nf_inet_addr *saddr;
struct nf_conntrack_tuple tuple;
int direct_rtp = 0, skip_expect = 0, ret = NF_DROP;
u_int16_t base_port;
__be16 rtp_port, rtcp_port;
const struct nf_nat_sip_hooks *hooks;
saddr = NULL;
if (sip_direct_media) {
if (!nf_inet_addr_cmp(daddr, &ct->tuplehash[dir].tuple.src.u3))
return NF_ACCEPT;
saddr = &ct->tuplehash[!dir].tuple.src.u3;
} else if (sip_external_media) {
struct net_device *dev = skb_dst(skb)->dev;
struct net *net = dev_net(dev);
struct flowi fl;
struct dst_entry *dst = NULL;
memset(&fl, 0, sizeof(fl));
switch (nf_ct_l3num(ct)) {
case NFPROTO_IPV4:
fl.u.ip4.daddr = daddr->ip;
nf_ip_route(net, &dst, &fl, false);
break;
case NFPROTO_IPV6:
fl.u.ip6.daddr = daddr->in6;
nf_ip6_route(net, &dst, &fl, false);
break;
}
/* Don't predict any conntracks when media endpoint is reachable
* through the same interface as the signalling peer.
*/
if (dst) {
bool external_media = (dst->dev == dev);
dst_release(dst);
if (external_media)
return NF_ACCEPT;
}
}
/* We need to check whether the registration exists before attempting
* to register it since we can see the same media description multiple
* times on different connections in case multiple endpoints receive
* the same call.
*
* RTP optimization: if we find a matching media channel expectation
* and both the expectation and this connection are SNATed, we assume
* both sides can reach each other directly and use the final
* destination address from the expectation. We still need to keep
* the NATed expectations for media that might arrive from the
* outside, and additionally need to expect the direct RTP stream
* in case it passes through us even without NAT.
*/
memset(&tuple, 0, sizeof(tuple));
if (saddr)
tuple.src.u3 = *saddr;
tuple.src.l3num = nf_ct_l3num(ct);
tuple.dst.protonum = IPPROTO_UDP;
tuple.dst.u3 = *daddr;
tuple.dst.u.udp.port = port;
do {
exp = __nf_ct_expect_find(net, nf_ct_zone(ct), &tuple);
if (!exp || exp->master == ct ||
nfct_help(exp->master)->helper != nfct_help(ct)->helper ||
exp->class != class)
break;
#if IS_ENABLED(CONFIG_NF_NAT)
if (!direct_rtp &&
(!nf_inet_addr_cmp(&exp->saved_addr, &exp->tuple.dst.u3) ||
exp->saved_proto.udp.port != exp->tuple.dst.u.udp.port) &&
ct->status & IPS_NAT_MASK) {
*daddr = exp->saved_addr;
tuple.dst.u3 = exp->saved_addr;
tuple.dst.u.udp.port = exp->saved_proto.udp.port;
direct_rtp = 1;
} else
#endif
skip_expect = 1;
} while (!skip_expect);
base_port = ntohs(tuple.dst.u.udp.port) & ~1;
rtp_port = htons(base_port);
rtcp_port = htons(base_port + 1);
if (direct_rtp) {
hooks = rcu_dereference(nf_nat_sip_hooks);
if (hooks &&
!hooks->sdp_port(skb, protoff, dataoff, dptr, datalen,
mediaoff, medialen, ntohs(rtp_port)))
goto err1;
}
if (skip_expect)
return NF_ACCEPT;
rtp_exp = nf_ct_expect_alloc(ct);
if (rtp_exp == NULL)
goto err1;
nf_ct_expect_init(rtp_exp, class, nf_ct_l3num(ct), saddr, daddr,
IPPROTO_UDP, NULL, &rtp_port);
rtcp_exp = nf_ct_expect_alloc(ct);
if (rtcp_exp == NULL)
goto err2;
nf_ct_expect_init(rtcp_exp, class, nf_ct_l3num(ct), saddr, daddr,
IPPROTO_UDP, NULL, &rtcp_port);
hooks = rcu_dereference(nf_nat_sip_hooks);
if (hooks && ct->status & IPS_NAT_MASK && !direct_rtp)
ret = hooks->sdp_media(skb, protoff, dataoff, dptr,
datalen, rtp_exp, rtcp_exp,
mediaoff, medialen, daddr);
else {
/* -EALREADY handling works around end-points that send
* SDP messages with identical port but different media type,
* we pretend expectation was set up.
* It also works in the case that SDP messages are sent with
* identical expect tuples but for different master conntracks.
*/
int errp = nf_ct_expect_related(rtp_exp,
NF_CT_EXP_F_SKIP_MASTER);
if (errp == 0 || errp == -EALREADY) {
int errcp = nf_ct_expect_related(rtcp_exp,
NF_CT_EXP_F_SKIP_MASTER);
if (errcp == 0 || errcp == -EALREADY)
ret = NF_ACCEPT;
else if (errp == 0)
nf_ct_unexpect_related(rtp_exp);
}
}
nf_ct_expect_put(rtcp_exp);
err2:
nf_ct_expect_put(rtp_exp);
err1:
return ret;
}
static const struct sdp_media_type sdp_media_types[] = {
SDP_MEDIA_TYPE("audio ", SIP_EXPECT_AUDIO),
SDP_MEDIA_TYPE("video ", SIP_EXPECT_VIDEO),
SDP_MEDIA_TYPE("image ", SIP_EXPECT_IMAGE),
};
static const struct sdp_media_type *sdp_media_type(const char *dptr,
unsigned int matchoff,
unsigned int matchlen)
{
const struct sdp_media_type *t;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(sdp_media_types); i++) {
t = &sdp_media_types[i];
if (matchlen < t->len ||
strncmp(dptr + matchoff, t->name, t->len))
continue;
return t;
}
return NULL;
}
static int process_sdp(struct sk_buff *skb, unsigned int protoff,
unsigned int dataoff,
const char **dptr, unsigned int *datalen,
unsigned int cseq)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
unsigned int matchoff, matchlen;
unsigned int mediaoff, medialen;
unsigned int sdpoff;
unsigned int caddr_len, maddr_len;
unsigned int i;
union nf_inet_addr caddr, maddr, rtp_addr;
const struct nf_nat_sip_hooks *hooks;
unsigned int port;
const struct sdp_media_type *t;
int ret = NF_ACCEPT;
hooks = rcu_dereference(nf_nat_sip_hooks);
/* Find beginning of session description */
if (ct_sip_get_sdp_header(ct, *dptr, 0, *datalen,
SDP_HDR_VERSION, SDP_HDR_UNSPEC,
&matchoff, &matchlen) <= 0)
return NF_ACCEPT;
sdpoff = matchoff;
/* The connection information is contained in the session description
* and/or once per media description. The first media description marks
* the end of the session description. */
caddr_len = 0;
if (ct_sip_parse_sdp_addr(ct, *dptr, sdpoff, *datalen,
SDP_HDR_CONNECTION, SDP_HDR_MEDIA,
&matchoff, &matchlen, &caddr) > 0)
caddr_len = matchlen;
mediaoff = sdpoff;
for (i = 0; i < ARRAY_SIZE(sdp_media_types); ) {
if (ct_sip_get_sdp_header(ct, *dptr, mediaoff, *datalen,
SDP_HDR_MEDIA, SDP_HDR_UNSPEC,
&mediaoff, &medialen) <= 0)
break;
/* Get media type and port number. A media port value of zero
* indicates an inactive stream. */
t = sdp_media_type(*dptr, mediaoff, medialen);
if (!t) {
mediaoff += medialen;
continue;
}
mediaoff += t->len;
medialen -= t->len;
port = simple_strtoul(*dptr + mediaoff, NULL, 10);
if (port == 0)
continue;
if (port < 1024 || port > 65535) {
nf_ct_helper_log(skb, ct, "wrong port %u", port);
return NF_DROP;
}
/* The media description overrides the session description. */
maddr_len = 0;
if (ct_sip_parse_sdp_addr(ct, *dptr, mediaoff, *datalen,
SDP_HDR_CONNECTION, SDP_HDR_MEDIA,
&matchoff, &matchlen, &maddr) > 0) {
maddr_len = matchlen;
memcpy(&rtp_addr, &maddr, sizeof(rtp_addr));
} else if (caddr_len)
memcpy(&rtp_addr, &caddr, sizeof(rtp_addr));
else {
nf_ct_helper_log(skb, ct, "cannot parse SDP message");
return NF_DROP;
}
ret = set_expected_rtp_rtcp(skb, protoff, dataoff,
dptr, datalen,
&rtp_addr, htons(port), t->class,
mediaoff, medialen);
if (ret != NF_ACCEPT) {
nf_ct_helper_log(skb, ct,
"cannot add expectation for voice");
return ret;
}
/* Update media connection address if present */
if (maddr_len && hooks && ct->status & IPS_NAT_MASK) {
ret = hooks->sdp_addr(skb, protoff, dataoff,
dptr, datalen, mediaoff,
SDP_HDR_CONNECTION,
SDP_HDR_MEDIA,
&rtp_addr);
if (ret != NF_ACCEPT) {
nf_ct_helper_log(skb, ct, "cannot mangle SDP");
return ret;
}
}
i++;
}
/* Update session connection and owner addresses */
hooks = rcu_dereference(nf_nat_sip_hooks);
if (hooks && ct->status & IPS_NAT_MASK)
ret = hooks->sdp_session(skb, protoff, dataoff,
dptr, datalen, sdpoff,
&rtp_addr);
return ret;
}
static int process_invite_response(struct sk_buff *skb, unsigned int protoff,
unsigned int dataoff,
const char **dptr, unsigned int *datalen,
unsigned int cseq, unsigned int code)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
if ((code >= 100 && code <= 199) ||
(code >= 200 && code <= 299))
return process_sdp(skb, protoff, dataoff, dptr, datalen, cseq);
else if (ct_sip_info->invite_cseq == cseq)
flush_expectations(ct, true);
return NF_ACCEPT;
}
static int process_update_response(struct sk_buff *skb, unsigned int protoff,
unsigned int dataoff,
const char **dptr, unsigned int *datalen,
unsigned int cseq, unsigned int code)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
if ((code >= 100 && code <= 199) ||
(code >= 200 && code <= 299))
return process_sdp(skb, protoff, dataoff, dptr, datalen, cseq);
else if (ct_sip_info->invite_cseq == cseq)
flush_expectations(ct, true);
return NF_ACCEPT;
}
static int process_prack_response(struct sk_buff *skb, unsigned int protoff,
unsigned int dataoff,
const char **dptr, unsigned int *datalen,
unsigned int cseq, unsigned int code)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
if ((code >= 100 && code <= 199) ||
(code >= 200 && code <= 299))
return process_sdp(skb, protoff, dataoff, dptr, datalen, cseq);
else if (ct_sip_info->invite_cseq == cseq)
flush_expectations(ct, true);
return NF_ACCEPT;
}
static int process_invite_request(struct sk_buff *skb, unsigned int protoff,
unsigned int dataoff,
const char **dptr, unsigned int *datalen,
unsigned int cseq)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
unsigned int ret;
flush_expectations(ct, true);
ret = process_sdp(skb, protoff, dataoff, dptr, datalen, cseq);
if (ret == NF_ACCEPT)
ct_sip_info->invite_cseq = cseq;
return ret;
}
static int process_bye_request(struct sk_buff *skb, unsigned int protoff,
unsigned int dataoff,
const char **dptr, unsigned int *datalen,
unsigned int cseq)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
flush_expectations(ct, true);
return NF_ACCEPT;
}
/* Parse a REGISTER request and create a permanent expectation for incoming
* signalling connections. The expectation is marked inactive and is activated
* when receiving a response indicating success from the registrar.
*/
static int process_register_request(struct sk_buff *skb, unsigned int protoff,
unsigned int dataoff,
const char **dptr, unsigned int *datalen,
unsigned int cseq)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
unsigned int matchoff, matchlen;
struct nf_conntrack_expect *exp;
union nf_inet_addr *saddr, daddr;
const struct nf_nat_sip_hooks *hooks;
struct nf_conntrack_helper *helper;
__be16 port;
u8 proto;
unsigned int expires = 0;
int ret;
/* Expected connections can not register again. */
if (ct->status & IPS_EXPECTED)
return NF_ACCEPT;
/* We must check the expiration time: a value of zero signals the
* registrar to release the binding. We'll remove our expectation
* when receiving the new bindings in the response, but we don't
* want to create new ones.
*
* The expiration time may be contained in Expires: header, the
* Contact: header parameters or the URI parameters.
*/
if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_EXPIRES,
&matchoff, &matchlen) > 0)
expires = simple_strtoul(*dptr + matchoff, NULL, 10);
ret = ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen,
SIP_HDR_CONTACT, NULL,
&matchoff, &matchlen, &daddr, &port);
if (ret < 0) {
nf_ct_helper_log(skb, ct, "cannot parse contact");
return NF_DROP;
} else if (ret == 0)
return NF_ACCEPT;
/* We don't support third-party registrations */
if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, &daddr))
return NF_ACCEPT;
if (ct_sip_parse_transport(ct, *dptr, matchoff + matchlen, *datalen,
&proto) == 0)
return NF_ACCEPT;
if (ct_sip_parse_numerical_param(ct, *dptr,
matchoff + matchlen, *datalen,
"expires=", NULL, NULL, &expires) < 0) {
nf_ct_helper_log(skb, ct, "cannot parse expires");
return NF_DROP;
}
if (expires == 0) {
ret = NF_ACCEPT;
goto store_cseq;
}
exp = nf_ct_expect_alloc(ct);
if (!exp) {
nf_ct_helper_log(skb, ct, "cannot alloc expectation");
return NF_DROP;
}
saddr = NULL;
if (sip_direct_signalling)
saddr = &ct->tuplehash[!dir].tuple.src.u3;
helper = rcu_dereference(nfct_help(ct)->helper);
if (!helper)
return NF_DROP;
nf_ct_expect_init(exp, SIP_EXPECT_SIGNALLING, nf_ct_l3num(ct),
saddr, &daddr, proto, NULL, &port);
exp->timeout.expires = sip_timeout * HZ;
exp->helper = helper;
exp->flags = NF_CT_EXPECT_PERMANENT | NF_CT_EXPECT_INACTIVE;
hooks = rcu_dereference(nf_nat_sip_hooks);
if (hooks && ct->status & IPS_NAT_MASK)
ret = hooks->expect(skb, protoff, dataoff, dptr, datalen,
exp, matchoff, matchlen);
else {
if (nf_ct_expect_related(exp, 0) != 0) {
nf_ct_helper_log(skb, ct, "cannot add expectation");
ret = NF_DROP;
} else
ret = NF_ACCEPT;
}
nf_ct_expect_put(exp);
store_cseq:
if (ret == NF_ACCEPT)
ct_sip_info->register_cseq = cseq;
return ret;
}
static int process_register_response(struct sk_buff *skb, unsigned int protoff,
unsigned int dataoff,
const char **dptr, unsigned int *datalen,
unsigned int cseq, unsigned int code)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
union nf_inet_addr addr;
__be16 port;
u8 proto;
unsigned int matchoff, matchlen, coff = 0;
unsigned int expires = 0;
int in_contact = 0, ret;
/* According to RFC 3261, "UAs MUST NOT send a new registration until
* they have received a final response from the registrar for the
* previous one or the previous REGISTER request has timed out".
*
* However, some servers fail to detect retransmissions and send late
* responses, so we store the sequence number of the last valid
* request and compare it here.
*/
if (ct_sip_info->register_cseq != cseq)
return NF_ACCEPT;
if (code >= 100 && code <= 199)
return NF_ACCEPT;
if (code < 200 || code > 299)
goto flush;
if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_EXPIRES,
&matchoff, &matchlen) > 0)
expires = simple_strtoul(*dptr + matchoff, NULL, 10);
while (1) {
unsigned int c_expires = expires;
ret = ct_sip_parse_header_uri(ct, *dptr, &coff, *datalen,
SIP_HDR_CONTACT, &in_contact,
&matchoff, &matchlen,
&addr, &port);
if (ret < 0) {
nf_ct_helper_log(skb, ct, "cannot parse contact");
return NF_DROP;
} else if (ret == 0)
break;
/* We don't support third-party registrations */
if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, &addr))
continue;
if (ct_sip_parse_transport(ct, *dptr, matchoff + matchlen,
*datalen, &proto) == 0)
continue;
ret = ct_sip_parse_numerical_param(ct, *dptr,
matchoff + matchlen,
*datalen, "expires=",
NULL, NULL, &c_expires);
if (ret < 0) {
nf_ct_helper_log(skb, ct, "cannot parse expires");
return NF_DROP;
}
if (c_expires == 0)
break;
if (refresh_signalling_expectation(ct, &addr, proto, port,
c_expires))
return NF_ACCEPT;
}
flush:
flush_expectations(ct, false);
return NF_ACCEPT;
}
static const struct sip_handler sip_handlers[] = {
SIP_HANDLER("INVITE", process_invite_request, process_invite_response),
SIP_HANDLER("UPDATE", process_sdp, process_update_response),
SIP_HANDLER("ACK", process_sdp, NULL),
SIP_HANDLER("PRACK", process_sdp, process_prack_response),
SIP_HANDLER("BYE", process_bye_request, NULL),
SIP_HANDLER("REGISTER", process_register_request, process_register_response),
};
static int process_sip_response(struct sk_buff *skb, unsigned int protoff,
unsigned int dataoff,
const char **dptr, unsigned int *datalen)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
unsigned int matchoff, matchlen, matchend;
unsigned int code, cseq, i;
if (*datalen < strlen("SIP/2.0 200"))
return NF_ACCEPT;
code = simple_strtoul(*dptr + strlen("SIP/2.0 "), NULL, 10);
if (!code) {
nf_ct_helper_log(skb, ct, "cannot get code");
return NF_DROP;
}
if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ,
&matchoff, &matchlen) <= 0) {
nf_ct_helper_log(skb, ct, "cannot parse cseq");
return NF_DROP;
}
cseq = simple_strtoul(*dptr + matchoff, NULL, 10);
if (!cseq && *(*dptr + matchoff) != '0') {
nf_ct_helper_log(skb, ct, "cannot get cseq");
return NF_DROP;
}
matchend = matchoff + matchlen + 1;
for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) {
const struct sip_handler *handler;
handler = &sip_handlers[i];
if (handler->response == NULL)
continue;
if (*datalen < matchend + handler->len ||
strncasecmp(*dptr + matchend, handler->method, handler->len))
continue;
return handler->response(skb, protoff, dataoff, dptr, datalen,
cseq, code);
}
return NF_ACCEPT;
}
static int process_sip_request(struct sk_buff *skb, unsigned int protoff,
unsigned int dataoff,
const char **dptr, unsigned int *datalen)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
unsigned int matchoff, matchlen;
unsigned int cseq, i;
union nf_inet_addr addr;
__be16 port;
/* Many Cisco IP phones use a high source port for SIP requests, but
* listen for the response on port 5060. If we are the local
* router for one of these phones, save the port number from the
* Via: header so that nf_nat_sip can redirect the responses to
* the correct port.
*/
if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen,
SIP_HDR_VIA_UDP, NULL, &matchoff,
&matchlen, &addr, &port) > 0 &&
port != ct->tuplehash[dir].tuple.src.u.udp.port &&
nf_inet_addr_cmp(&addr, &ct->tuplehash[dir].tuple.src.u3))
ct_sip_info->forced_dport = port;
for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) {
const struct sip_handler *handler;
handler = &sip_handlers[i];
if (handler->request == NULL)
continue;
if (*datalen < handler->len + 2 ||
strncasecmp(*dptr, handler->method, handler->len))
continue;
if ((*dptr)[handler->len] != ' ' ||
!isalpha((*dptr)[handler->len+1]))
continue;
if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ,
&matchoff, &matchlen) <= 0) {
nf_ct_helper_log(skb, ct, "cannot parse cseq");
return NF_DROP;
}
cseq = simple_strtoul(*dptr + matchoff, NULL, 10);
if (!cseq && *(*dptr + matchoff) != '0') {
nf_ct_helper_log(skb, ct, "cannot get cseq");
return NF_DROP;
}
return handler->request(skb, protoff, dataoff, dptr, datalen,
cseq);
}
return NF_ACCEPT;
}
static int process_sip_msg(struct sk_buff *skb, struct nf_conn *ct,
unsigned int protoff, unsigned int dataoff,
const char **dptr, unsigned int *datalen)
{
const struct nf_nat_sip_hooks *hooks;
int ret;
if (strncasecmp(*dptr, "SIP/2.0 ", strlen("SIP/2.0 ")) != 0)
ret = process_sip_request(skb, protoff, dataoff, dptr, datalen);
else
ret = process_sip_response(skb, protoff, dataoff, dptr, datalen);
if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) {
hooks = rcu_dereference(nf_nat_sip_hooks);
if (hooks && !hooks->msg(skb, protoff, dataoff,
dptr, datalen)) {
nf_ct_helper_log(skb, ct, "cannot NAT SIP message");
ret = NF_DROP;
}
}
return ret;
}
static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
struct nf_conn *ct, enum ip_conntrack_info ctinfo)
{
struct tcphdr *th, _tcph;
unsigned int dataoff, datalen;
unsigned int matchoff, matchlen, clen;
unsigned int msglen, origlen;
const char *dptr, *end;
s16 diff, tdiff = 0;
int ret = NF_ACCEPT;
bool term;
if (ctinfo != IP_CT_ESTABLISHED &&
ctinfo != IP_CT_ESTABLISHED_REPLY)
return NF_ACCEPT;
/* No Data ? */
th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
if (th == NULL)
return NF_ACCEPT;
dataoff = protoff + th->doff * 4;
if (dataoff >= skb->len)
return NF_ACCEPT;
nf_ct_refresh(ct, skb, sip_timeout * HZ);
if (unlikely(skb_linearize(skb)))
return NF_DROP;
dptr = skb->data + dataoff;
datalen = skb->len - dataoff;
if (datalen < strlen("SIP/2.0 200"))
return NF_ACCEPT;
while (1) {
if (ct_sip_get_header(ct, dptr, 0, datalen,
SIP_HDR_CONTENT_LENGTH,
&matchoff, &matchlen) <= 0)
break;
clen = simple_strtoul(dptr + matchoff, (char **)&end, 10);
if (dptr + matchoff == end)
break;
term = false;
for (; end + strlen("\r\n\r\n") <= dptr + datalen; end++) {
if (end[0] == '\r' && end[1] == '\n' &&
end[2] == '\r' && end[3] == '\n') {
term = true;
break;
}
}
if (!term)
break;
end += strlen("\r\n\r\n") + clen;
msglen = origlen = end - dptr;
if (msglen > datalen)
return NF_ACCEPT;
ret = process_sip_msg(skb, ct, protoff, dataoff,
&dptr, &msglen);
/* process_sip_* functions report why this packet is dropped */
if (ret != NF_ACCEPT)
break;
diff = msglen - origlen;
tdiff += diff;
dataoff += msglen;
dptr += msglen;
datalen = datalen + diff - msglen;
}
if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) {
const struct nf_nat_sip_hooks *hooks;
hooks = rcu_dereference(nf_nat_sip_hooks);
if (hooks)
hooks->seq_adjust(skb, protoff, tdiff);
}
return ret;
}
static int sip_help_udp(struct sk_buff *skb, unsigned int protoff,
struct nf_conn *ct, enum ip_conntrack_info ctinfo)
{
unsigned int dataoff, datalen;
const char *dptr;
/* No Data ? */
dataoff = protoff + sizeof(struct udphdr);
if (dataoff >= skb->len)
return NF_ACCEPT;
nf_ct_refresh(ct, skb, sip_timeout * HZ);
if (unlikely(skb_linearize(skb)))
return NF_DROP;
dptr = skb->data + dataoff;
datalen = skb->len - dataoff;
if (datalen < strlen("SIP/2.0 200"))
return NF_ACCEPT;
return process_sip_msg(skb, ct, protoff, dataoff, &dptr, &datalen);
}
static struct nf_conntrack_helper sip[MAX_PORTS * 4] __read_mostly;
static const struct nf_conntrack_expect_policy sip_exp_policy[SIP_EXPECT_MAX + 1] = {
[SIP_EXPECT_SIGNALLING] = {
.name = "signalling",
.max_expected = 1,
.timeout = 3 * 60,
},
[SIP_EXPECT_AUDIO] = {
.name = "audio",
.max_expected = 2 * IP_CT_DIR_MAX,
.timeout = 3 * 60,
},
[SIP_EXPECT_VIDEO] = {
.name = "video",
.max_expected = 2 * IP_CT_DIR_MAX,
.timeout = 3 * 60,
},
[SIP_EXPECT_IMAGE] = {
.name = "image",
.max_expected = IP_CT_DIR_MAX,
.timeout = 3 * 60,
},
};
static void __exit nf_conntrack_sip_fini(void)
{
nf_conntrack_helpers_unregister(sip, ports_c * 4);
}
static int __init nf_conntrack_sip_init(void)
{
int i, ret;
NF_CT_HELPER_BUILD_BUG_ON(sizeof(struct nf_ct_sip_master));
if (ports_c == 0)
ports[ports_c++] = SIP_PORT;
for (i = 0; i < ports_c; i++) {
nf_ct_helper_init(&sip[4 * i], AF_INET, IPPROTO_UDP,
HELPER_NAME, SIP_PORT, ports[i], i,
sip_exp_policy, SIP_EXPECT_MAX, sip_help_udp,
NULL, THIS_MODULE);
nf_ct_helper_init(&sip[4 * i + 1], AF_INET, IPPROTO_TCP,
HELPER_NAME, SIP_PORT, ports[i], i,
sip_exp_policy, SIP_EXPECT_MAX, sip_help_tcp,
NULL, THIS_MODULE);
nf_ct_helper_init(&sip[4 * i + 2], AF_INET6, IPPROTO_UDP,
HELPER_NAME, SIP_PORT, ports[i], i,
sip_exp_policy, SIP_EXPECT_MAX, sip_help_udp,
NULL, THIS_MODULE);
nf_ct_helper_init(&sip[4 * i + 3], AF_INET6, IPPROTO_TCP,
HELPER_NAME, SIP_PORT, ports[i], i,
sip_exp_policy, SIP_EXPECT_MAX, sip_help_tcp,
NULL, THIS_MODULE);
}
ret = nf_conntrack_helpers_register(sip, ports_c * 4);
if (ret < 0) {
pr_err("failed to register helpers\n");
return ret;
}
return 0;
}
module_init(nf_conntrack_sip_init);
module_exit(nf_conntrack_sip_fini);
| linux-master | net/netfilter/nf_conntrack_sip.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* NetBIOS name service broadcast connection tracking helper
*
* (c) 2005 Patrick McHardy <[email protected]>
*/
/*
* This helper tracks locally originating NetBIOS name service
* requests by issuing permanent expectations (valid until
* timing out) matching all reply connections from the
* destination network. The only NetBIOS specific thing is
* actually the port number.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/in.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_expect.h>
#define HELPER_NAME "netbios-ns"
#define NMBD_PORT 137
MODULE_AUTHOR("Patrick McHardy <[email protected]>");
MODULE_DESCRIPTION("NetBIOS name service broadcast connection tracking helper");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ip_conntrack_netbios_ns");
MODULE_ALIAS_NFCT_HELPER(HELPER_NAME);
static unsigned int timeout __read_mostly = 3;
module_param(timeout, uint, 0400);
MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds");
static struct nf_conntrack_expect_policy exp_policy = {
.max_expected = 1,
};
static int netbios_ns_help(struct sk_buff *skb, unsigned int protoff,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo)
{
return nf_conntrack_broadcast_help(skb, ct, ctinfo, timeout);
}
static struct nf_conntrack_helper helper __read_mostly = {
.name = HELPER_NAME,
.tuple.src.l3num = NFPROTO_IPV4,
.tuple.src.u.udp.port = cpu_to_be16(NMBD_PORT),
.tuple.dst.protonum = IPPROTO_UDP,
.me = THIS_MODULE,
.help = netbios_ns_help,
.expect_policy = &exp_policy,
};
static int __init nf_conntrack_netbios_ns_init(void)
{
NF_CT_HELPER_BUILD_BUG_ON(0);
exp_policy.timeout = timeout;
return nf_conntrack_helper_register(&helper);
}
static void __exit nf_conntrack_netbios_ns_fini(void)
{
nf_conntrack_helper_unregister(&helper);
}
module_init(nf_conntrack_netbios_ns_init);
module_exit(nf_conntrack_netbios_ns_fini);
| linux-master | net/netfilter/nf_conntrack_netbios_ns.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/netfilter/nf_conntrack_common.h>
#include <linux/netfilter/nf_tables.h>
#include <net/ip.h> /* for ipv4 options. */
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_extend.h>
#include <net/netfilter/nf_flow_table.h>
struct nft_flow_offload {
struct nft_flowtable *flowtable;
};
static enum flow_offload_xmit_type nft_xmit_type(struct dst_entry *dst)
{
if (dst_xfrm(dst))
return FLOW_OFFLOAD_XMIT_XFRM;
return FLOW_OFFLOAD_XMIT_NEIGH;
}
static void nft_default_forward_path(struct nf_flow_route *route,
struct dst_entry *dst_cache,
enum ip_conntrack_dir dir)
{
route->tuple[!dir].in.ifindex = dst_cache->dev->ifindex;
route->tuple[dir].dst = dst_cache;
route->tuple[dir].xmit_type = nft_xmit_type(dst_cache);
}
static bool nft_is_valid_ether_device(const struct net_device *dev)
{
if (!dev || (dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
dev->addr_len != ETH_ALEN || !is_valid_ether_addr(dev->dev_addr))
return false;
return true;
}
static int nft_dev_fill_forward_path(const struct nf_flow_route *route,
const struct dst_entry *dst_cache,
const struct nf_conn *ct,
enum ip_conntrack_dir dir, u8 *ha,
struct net_device_path_stack *stack)
{
const void *daddr = &ct->tuplehash[!dir].tuple.src.u3;
struct net_device *dev = dst_cache->dev;
struct neighbour *n;
u8 nud_state;
if (!nft_is_valid_ether_device(dev))
goto out;
n = dst_neigh_lookup(dst_cache, daddr);
if (!n)
return -1;
read_lock_bh(&n->lock);
nud_state = n->nud_state;
ether_addr_copy(ha, n->ha);
read_unlock_bh(&n->lock);
neigh_release(n);
if (!(nud_state & NUD_VALID))
return -1;
out:
return dev_fill_forward_path(dev, ha, stack);
}
struct nft_forward_info {
const struct net_device *indev;
const struct net_device *outdev;
const struct net_device *hw_outdev;
struct id {
__u16 id;
__be16 proto;
} encap[NF_FLOW_TABLE_ENCAP_MAX];
u8 num_encaps;
u8 ingress_vlans;
u8 h_source[ETH_ALEN];
u8 h_dest[ETH_ALEN];
enum flow_offload_xmit_type xmit_type;
};
static void nft_dev_path_info(const struct net_device_path_stack *stack,
struct nft_forward_info *info,
unsigned char *ha, struct nf_flowtable *flowtable)
{
const struct net_device_path *path;
int i;
memcpy(info->h_dest, ha, ETH_ALEN);
for (i = 0; i < stack->num_paths; i++) {
path = &stack->path[i];
switch (path->type) {
case DEV_PATH_ETHERNET:
case DEV_PATH_DSA:
case DEV_PATH_VLAN:
case DEV_PATH_PPPOE:
info->indev = path->dev;
if (is_zero_ether_addr(info->h_source))
memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN);
if (path->type == DEV_PATH_ETHERNET)
break;
if (path->type == DEV_PATH_DSA) {
i = stack->num_paths;
break;
}
/* DEV_PATH_VLAN and DEV_PATH_PPPOE */
if (info->num_encaps >= NF_FLOW_TABLE_ENCAP_MAX) {
info->indev = NULL;
break;
}
if (!info->outdev)
info->outdev = path->dev;
info->encap[info->num_encaps].id = path->encap.id;
info->encap[info->num_encaps].proto = path->encap.proto;
info->num_encaps++;
if (path->type == DEV_PATH_PPPOE)
memcpy(info->h_dest, path->encap.h_dest, ETH_ALEN);
break;
case DEV_PATH_BRIDGE:
if (is_zero_ether_addr(info->h_source))
memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN);
switch (path->bridge.vlan_mode) {
case DEV_PATH_BR_VLAN_UNTAG_HW:
info->ingress_vlans |= BIT(info->num_encaps - 1);
break;
case DEV_PATH_BR_VLAN_TAG:
info->encap[info->num_encaps].id = path->bridge.vlan_id;
info->encap[info->num_encaps].proto = path->bridge.vlan_proto;
info->num_encaps++;
break;
case DEV_PATH_BR_VLAN_UNTAG:
info->num_encaps--;
break;
case DEV_PATH_BR_VLAN_KEEP:
break;
}
info->xmit_type = FLOW_OFFLOAD_XMIT_DIRECT;
break;
default:
info->indev = NULL;
break;
}
}
if (!info->outdev)
info->outdev = info->indev;
info->hw_outdev = info->indev;
if (nf_flowtable_hw_offload(flowtable) &&
nft_is_valid_ether_device(info->indev))
info->xmit_type = FLOW_OFFLOAD_XMIT_DIRECT;
}
static bool nft_flowtable_find_dev(const struct net_device *dev,
struct nft_flowtable *ft)
{
struct nft_hook *hook;
bool found = false;
list_for_each_entry_rcu(hook, &ft->hook_list, list) {
if (hook->ops.dev != dev)
continue;
found = true;
break;
}
return found;
}
static void nft_dev_forward_path(struct nf_flow_route *route,
const struct nf_conn *ct,
enum ip_conntrack_dir dir,
struct nft_flowtable *ft)
{
const struct dst_entry *dst = route->tuple[dir].dst;
struct net_device_path_stack stack;
struct nft_forward_info info = {};
unsigned char ha[ETH_ALEN];
int i;
if (nft_dev_fill_forward_path(route, dst, ct, dir, ha, &stack) >= 0)
nft_dev_path_info(&stack, &info, ha, &ft->data);
if (!info.indev || !nft_flowtable_find_dev(info.indev, ft))
return;
route->tuple[!dir].in.ifindex = info.indev->ifindex;
for (i = 0; i < info.num_encaps; i++) {
route->tuple[!dir].in.encap[i].id = info.encap[i].id;
route->tuple[!dir].in.encap[i].proto = info.encap[i].proto;
}
route->tuple[!dir].in.num_encaps = info.num_encaps;
route->tuple[!dir].in.ingress_vlans = info.ingress_vlans;
if (info.xmit_type == FLOW_OFFLOAD_XMIT_DIRECT) {
memcpy(route->tuple[dir].out.h_source, info.h_source, ETH_ALEN);
memcpy(route->tuple[dir].out.h_dest, info.h_dest, ETH_ALEN);
route->tuple[dir].out.ifindex = info.outdev->ifindex;
route->tuple[dir].out.hw_ifindex = info.hw_outdev->ifindex;
route->tuple[dir].xmit_type = info.xmit_type;
}
}
static int nft_flow_route(const struct nft_pktinfo *pkt,
const struct nf_conn *ct,
struct nf_flow_route *route,
enum ip_conntrack_dir dir,
struct nft_flowtable *ft)
{
struct dst_entry *this_dst = skb_dst(pkt->skb);
struct dst_entry *other_dst = NULL;
struct flowi fl;
memset(&fl, 0, sizeof(fl));
switch (nft_pf(pkt)) {
case NFPROTO_IPV4:
fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip;
fl.u.ip4.saddr = ct->tuplehash[!dir].tuple.src.u3.ip;
fl.u.ip4.flowi4_oif = nft_in(pkt)->ifindex;
fl.u.ip4.flowi4_iif = this_dst->dev->ifindex;
fl.u.ip4.flowi4_tos = RT_TOS(ip_hdr(pkt->skb)->tos);
fl.u.ip4.flowi4_mark = pkt->skb->mark;
fl.u.ip4.flowi4_flags = FLOWI_FLAG_ANYSRC;
break;
case NFPROTO_IPV6:
fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6;
fl.u.ip6.saddr = ct->tuplehash[!dir].tuple.src.u3.in6;
fl.u.ip6.flowi6_oif = nft_in(pkt)->ifindex;
fl.u.ip6.flowi6_iif = this_dst->dev->ifindex;
fl.u.ip6.flowlabel = ip6_flowinfo(ipv6_hdr(pkt->skb));
fl.u.ip6.flowi6_mark = pkt->skb->mark;
fl.u.ip6.flowi6_flags = FLOWI_FLAG_ANYSRC;
break;
}
if (!dst_hold_safe(this_dst))
return -ENOENT;
nf_route(nft_net(pkt), &other_dst, &fl, false, nft_pf(pkt));
if (!other_dst) {
dst_release(this_dst);
return -ENOENT;
}
nft_default_forward_path(route, this_dst, dir);
nft_default_forward_path(route, other_dst, !dir);
if (route->tuple[dir].xmit_type == FLOW_OFFLOAD_XMIT_NEIGH &&
route->tuple[!dir].xmit_type == FLOW_OFFLOAD_XMIT_NEIGH) {
nft_dev_forward_path(route, ct, dir, ft);
nft_dev_forward_path(route, ct, !dir, ft);
}
return 0;
}
static bool nft_flow_offload_skip(struct sk_buff *skb, int family)
{
if (skb_sec_path(skb))
return true;
if (family == NFPROTO_IPV4) {
const struct ip_options *opt;
opt = &(IPCB(skb)->opt);
if (unlikely(opt->optlen))
return true;
}
return false;
}
static void nft_flow_offload_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_flow_offload *priv = nft_expr_priv(expr);
struct nf_flowtable *flowtable = &priv->flowtable->data;
struct tcphdr _tcph, *tcph = NULL;
struct nf_flow_route route = {};
enum ip_conntrack_info ctinfo;
struct flow_offload *flow;
enum ip_conntrack_dir dir;
struct nf_conn *ct;
int ret;
if (nft_flow_offload_skip(pkt->skb, nft_pf(pkt)))
goto out;
ct = nf_ct_get(pkt->skb, &ctinfo);
if (!ct)
goto out;
switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) {
case IPPROTO_TCP:
tcph = skb_header_pointer(pkt->skb, nft_thoff(pkt),
sizeof(_tcph), &_tcph);
if (unlikely(!tcph || tcph->fin || tcph->rst ||
!nf_conntrack_tcp_established(ct)))
goto out;
break;
case IPPROTO_UDP:
break;
#ifdef CONFIG_NF_CT_PROTO_GRE
case IPPROTO_GRE: {
struct nf_conntrack_tuple *tuple;
if (ct->status & IPS_NAT_MASK)
goto out;
tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
/* No support for GRE v1 */
if (tuple->src.u.gre.key || tuple->dst.u.gre.key)
goto out;
break;
}
#endif
default:
goto out;
}
if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
ct->status & (IPS_SEQ_ADJUST | IPS_NAT_CLASH))
goto out;
if (!nf_ct_is_confirmed(ct))
goto out;
if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
goto out;
dir = CTINFO2DIR(ctinfo);
if (nft_flow_route(pkt, ct, &route, dir, priv->flowtable) < 0)
goto err_flow_route;
flow = flow_offload_alloc(ct);
if (!flow)
goto err_flow_alloc;
flow_offload_route_init(flow, &route);
if (tcph) {
ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
}
ret = flow_offload_add(flowtable, flow);
if (ret < 0)
goto err_flow_add;
return;
err_flow_add:
flow_offload_free(flow);
err_flow_alloc:
dst_release(route.tuple[dir].dst);
dst_release(route.tuple[!dir].dst);
err_flow_route:
clear_bit(IPS_OFFLOAD_BIT, &ct->status);
out:
regs->verdict.code = NFT_BREAK;
}
static int nft_flow_offload_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
unsigned int hook_mask = (1 << NF_INET_FORWARD);
return nft_chain_validate_hooks(ctx->chain, hook_mask);
}
static const struct nla_policy nft_flow_offload_policy[NFTA_FLOW_MAX + 1] = {
[NFTA_FLOW_TABLE_NAME] = { .type = NLA_STRING,
.len = NFT_NAME_MAXLEN - 1 },
};
static int nft_flow_offload_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_flow_offload *priv = nft_expr_priv(expr);
u8 genmask = nft_genmask_next(ctx->net);
struct nft_flowtable *flowtable;
if (!tb[NFTA_FLOW_TABLE_NAME])
return -EINVAL;
flowtable = nft_flowtable_lookup(ctx->table, tb[NFTA_FLOW_TABLE_NAME],
genmask);
if (IS_ERR(flowtable))
return PTR_ERR(flowtable);
if (!nft_use_inc(&flowtable->use))
return -EMFILE;
priv->flowtable = flowtable;
return nf_ct_netns_get(ctx->net, ctx->family);
}
static void nft_flow_offload_deactivate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
enum nft_trans_phase phase)
{
struct nft_flow_offload *priv = nft_expr_priv(expr);
nf_tables_deactivate_flowtable(ctx, priv->flowtable, phase);
}
static void nft_flow_offload_activate(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
struct nft_flow_offload *priv = nft_expr_priv(expr);
nft_use_inc_restore(&priv->flowtable->use);
}
static void nft_flow_offload_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
nf_ct_netns_put(ctx->net, ctx->family);
}
static int nft_flow_offload_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
struct nft_flow_offload *priv = nft_expr_priv(expr);
if (nla_put_string(skb, NFTA_FLOW_TABLE_NAME, priv->flowtable->name))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static struct nft_expr_type nft_flow_offload_type;
static const struct nft_expr_ops nft_flow_offload_ops = {
.type = &nft_flow_offload_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_flow_offload)),
.eval = nft_flow_offload_eval,
.init = nft_flow_offload_init,
.activate = nft_flow_offload_activate,
.deactivate = nft_flow_offload_deactivate,
.destroy = nft_flow_offload_destroy,
.validate = nft_flow_offload_validate,
.dump = nft_flow_offload_dump,
.reduce = NFT_REDUCE_READONLY,
};
static struct nft_expr_type nft_flow_offload_type __read_mostly = {
.name = "flow_offload",
.ops = &nft_flow_offload_ops,
.policy = nft_flow_offload_policy,
.maxattr = NFTA_FLOW_MAX,
.owner = THIS_MODULE,
};
static int flow_offload_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
if (event != NETDEV_DOWN)
return NOTIFY_DONE;
nf_flow_table_cleanup(dev);
return NOTIFY_DONE;
}
static struct notifier_block flow_offload_netdev_notifier = {
.notifier_call = flow_offload_netdev_event,
};
static int __init nft_flow_offload_module_init(void)
{
int err;
err = register_netdevice_notifier(&flow_offload_netdev_notifier);
if (err)
goto err;
err = nft_register_expr(&nft_flow_offload_type);
if (err < 0)
goto register_expr;
return 0;
register_expr:
unregister_netdevice_notifier(&flow_offload_netdev_notifier);
err:
return err;
}
static void __exit nft_flow_offload_module_exit(void)
{
nft_unregister_expr(&nft_flow_offload_type);
unregister_netdevice_notifier(&flow_offload_netdev_notifier);
}
module_init(nft_flow_offload_module_init);
module_exit(nft_flow_offload_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <[email protected]>");
MODULE_ALIAS_NFT_EXPR("flow_offload");
MODULE_DESCRIPTION("nftables hardware flow offload module");
| linux-master | net/netfilter/nft_flow_offload.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2015 Red Hat GmbH
* Author: Florian Westphal <[email protected]>
*/
#include <linux/module.h>
#include <linux/static_key.h>
#include <linux/hash.h>
#include <linux/siphash.h>
#include <linux/if_vlan.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_tables.h>
#define NFT_TRACETYPE_LL_HSIZE 20
#define NFT_TRACETYPE_NETWORK_HSIZE 40
#define NFT_TRACETYPE_TRANSPORT_HSIZE 20
DEFINE_STATIC_KEY_FALSE(nft_trace_enabled);
EXPORT_SYMBOL_GPL(nft_trace_enabled);
static int trace_fill_header(struct sk_buff *nlskb, u16 type,
const struct sk_buff *skb,
int off, unsigned int len)
{
struct nlattr *nla;
if (len == 0)
return 0;
nla = nla_reserve(nlskb, type, len);
if (!nla || skb_copy_bits(skb, off, nla_data(nla), len))
return -1;
return 0;
}
static int nf_trace_fill_ll_header(struct sk_buff *nlskb,
const struct sk_buff *skb)
{
struct vlan_ethhdr veth;
int off;
BUILD_BUG_ON(sizeof(veth) > NFT_TRACETYPE_LL_HSIZE);
off = skb_mac_header(skb) - skb->data;
if (off != -ETH_HLEN)
return -1;
if (skb_copy_bits(skb, off, &veth, ETH_HLEN))
return -1;
veth.h_vlan_proto = skb->vlan_proto;
veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
veth.h_vlan_encapsulated_proto = skb->protocol;
return nla_put(nlskb, NFTA_TRACE_LL_HEADER, sizeof(veth), &veth);
}
static int nf_trace_fill_dev_info(struct sk_buff *nlskb,
const struct net_device *indev,
const struct net_device *outdev)
{
if (indev) {
if (nla_put_be32(nlskb, NFTA_TRACE_IIF,
htonl(indev->ifindex)))
return -1;
if (nla_put_be16(nlskb, NFTA_TRACE_IIFTYPE,
htons(indev->type)))
return -1;
}
if (outdev) {
if (nla_put_be32(nlskb, NFTA_TRACE_OIF,
htonl(outdev->ifindex)))
return -1;
if (nla_put_be16(nlskb, NFTA_TRACE_OIFTYPE,
htons(outdev->type)))
return -1;
}
return 0;
}
static int nf_trace_fill_pkt_info(struct sk_buff *nlskb,
const struct nft_pktinfo *pkt)
{
const struct sk_buff *skb = pkt->skb;
int off = skb_network_offset(skb);
unsigned int len, nh_end;
nh_end = pkt->flags & NFT_PKTINFO_L4PROTO ? nft_thoff(pkt) : skb->len;
len = min_t(unsigned int, nh_end - skb_network_offset(skb),
NFT_TRACETYPE_NETWORK_HSIZE);
if (trace_fill_header(nlskb, NFTA_TRACE_NETWORK_HEADER, skb, off, len))
return -1;
if (pkt->flags & NFT_PKTINFO_L4PROTO) {
len = min_t(unsigned int, skb->len - nft_thoff(pkt),
NFT_TRACETYPE_TRANSPORT_HSIZE);
if (trace_fill_header(nlskb, NFTA_TRACE_TRANSPORT_HEADER, skb,
nft_thoff(pkt), len))
return -1;
}
if (!skb_mac_header_was_set(skb))
return 0;
if (skb_vlan_tag_get(skb))
return nf_trace_fill_ll_header(nlskb, skb);
off = skb_mac_header(skb) - skb->data;
len = min_t(unsigned int, -off, NFT_TRACETYPE_LL_HSIZE);
return trace_fill_header(nlskb, NFTA_TRACE_LL_HEADER,
skb, off, len);
}
static int nf_trace_fill_rule_info(struct sk_buff *nlskb,
const struct nft_verdict *verdict,
const struct nft_rule_dp *rule,
const struct nft_traceinfo *info)
{
if (!rule || rule->is_last)
return 0;
/* a continue verdict with ->type == RETURN means that this is
* an implicit return (end of chain reached).
*
* Since no rule matched, the ->rule pointer is invalid.
*/
if (info->type == NFT_TRACETYPE_RETURN &&
verdict->code == NFT_CONTINUE)
return 0;
return nla_put_be64(nlskb, NFTA_TRACE_RULE_HANDLE,
cpu_to_be64(rule->handle),
NFTA_TRACE_PAD);
}
static bool nft_trace_have_verdict_chain(const struct nft_verdict *verdict,
struct nft_traceinfo *info)
{
switch (info->type) {
case NFT_TRACETYPE_RETURN:
case NFT_TRACETYPE_RULE:
break;
default:
return false;
}
switch (verdict->code) {
case NFT_JUMP:
case NFT_GOTO:
break;
default:
return false;
}
return true;
}
static const struct nft_chain *nft_trace_get_chain(const struct nft_rule_dp *rule,
const struct nft_traceinfo *info)
{
const struct nft_rule_dp_last *last;
if (!rule)
return &info->basechain->chain;
while (!rule->is_last)
rule = nft_rule_next(rule);
last = (const struct nft_rule_dp_last *)rule;
if (WARN_ON_ONCE(!last->chain))
return &info->basechain->chain;
return last->chain;
}
void nft_trace_notify(const struct nft_pktinfo *pkt,
const struct nft_verdict *verdict,
const struct nft_rule_dp *rule,
struct nft_traceinfo *info)
{
const struct nft_chain *chain;
struct nlmsghdr *nlh;
struct sk_buff *skb;
unsigned int size;
u32 mark = 0;
u16 event;
if (!nfnetlink_has_listeners(nft_net(pkt), NFNLGRP_NFTRACE))
return;
chain = nft_trace_get_chain(rule, info);
size = nlmsg_total_size(sizeof(struct nfgenmsg)) +
nla_total_size(strlen(chain->table->name)) +
nla_total_size(strlen(chain->name)) +
nla_total_size_64bit(sizeof(__be64)) + /* rule handle */
nla_total_size(sizeof(__be32)) + /* trace type */
nla_total_size(0) + /* VERDICT, nested */
nla_total_size(sizeof(u32)) + /* verdict code */
nla_total_size(sizeof(u32)) + /* id */
nla_total_size(NFT_TRACETYPE_LL_HSIZE) +
nla_total_size(NFT_TRACETYPE_NETWORK_HSIZE) +
nla_total_size(NFT_TRACETYPE_TRANSPORT_HSIZE) +
nla_total_size(sizeof(u32)) + /* iif */
nla_total_size(sizeof(__be16)) + /* iiftype */
nla_total_size(sizeof(u32)) + /* oif */
nla_total_size(sizeof(__be16)) + /* oiftype */
nla_total_size(sizeof(u32)) + /* mark */
nla_total_size(sizeof(u32)) + /* nfproto */
nla_total_size(sizeof(u32)); /* policy */
if (nft_trace_have_verdict_chain(verdict, info))
size += nla_total_size(strlen(verdict->chain->name)); /* jump target */
skb = nlmsg_new(size, GFP_ATOMIC);
if (!skb)
return;
event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, NFT_MSG_TRACE);
nlh = nfnl_msg_put(skb, 0, 0, event, 0, info->basechain->type->family,
NFNETLINK_V0, 0);
if (!nlh)
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_TRACE_NFPROTO, htonl(nft_pf(pkt))))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_TRACE_TYPE, htonl(info->type)))
goto nla_put_failure;
if (nla_put_u32(skb, NFTA_TRACE_ID, info->skbid))
goto nla_put_failure;
if (nla_put_string(skb, NFTA_TRACE_CHAIN, chain->name))
goto nla_put_failure;
if (nla_put_string(skb, NFTA_TRACE_TABLE, chain->table->name))
goto nla_put_failure;
if (nf_trace_fill_rule_info(skb, verdict, rule, info))
goto nla_put_failure;
switch (info->type) {
case NFT_TRACETYPE_UNSPEC:
case __NFT_TRACETYPE_MAX:
break;
case NFT_TRACETYPE_RETURN:
case NFT_TRACETYPE_RULE:
if (nft_verdict_dump(skb, NFTA_TRACE_VERDICT, verdict))
goto nla_put_failure;
/* pkt->skb undefined iff NF_STOLEN, disable dump */
if (verdict->code == NF_STOLEN)
info->packet_dumped = true;
else
mark = pkt->skb->mark;
break;
case NFT_TRACETYPE_POLICY:
mark = pkt->skb->mark;
if (nla_put_be32(skb, NFTA_TRACE_POLICY,
htonl(info->basechain->policy)))
goto nla_put_failure;
break;
}
if (mark && nla_put_be32(skb, NFTA_TRACE_MARK, htonl(mark)))
goto nla_put_failure;
if (!info->packet_dumped) {
if (nf_trace_fill_dev_info(skb, nft_in(pkt), nft_out(pkt)))
goto nla_put_failure;
if (nf_trace_fill_pkt_info(skb, pkt))
goto nla_put_failure;
info->packet_dumped = true;
}
nlmsg_end(skb, nlh);
nfnetlink_send(skb, nft_net(pkt), 0, NFNLGRP_NFTRACE, 0, GFP_ATOMIC);
return;
nla_put_failure:
WARN_ON_ONCE(1);
kfree_skb(skb);
}
void nft_trace_init(struct nft_traceinfo *info, const struct nft_pktinfo *pkt,
const struct nft_chain *chain)
{
static siphash_key_t trace_key __read_mostly;
struct sk_buff *skb = pkt->skb;
info->basechain = nft_base_chain(chain);
info->trace = true;
info->nf_trace = pkt->skb->nf_trace;
info->packet_dumped = false;
net_get_random_once(&trace_key, sizeof(trace_key));
info->skbid = (u32)siphash_3u32(hash32_ptr(skb),
skb_get_hash(skb),
skb->skb_iif,
&trace_key);
}
| linux-master | net/netfilter/nf_tables_trace.c |
// SPDX-License-Identifier: GPL-2.0-only
/* PIPAPO: PIle PAcket POlicies: set for arbitrary concatenations of ranges
*
* Copyright (c) 2019-2020 Red Hat GmbH
*
* Author: Stefano Brivio <[email protected]>
*/
/**
* DOC: Theory of Operation
*
*
* Problem
* -------
*
* Match packet bytes against entries composed of ranged or non-ranged packet
* field specifiers, mapping them to arbitrary references. For example:
*
* ::
*
* --- fields --->
* | [net],[port],[net]... => [reference]
* entries [net],[port],[net]... => [reference]
* | [net],[port],[net]... => [reference]
* V ...
*
* where [net] fields can be IP ranges or netmasks, and [port] fields are port
* ranges. Arbitrary packet fields can be matched.
*
*
* Algorithm Overview
* ------------------
*
* This algorithm is loosely inspired by [Ligatti 2010], and fundamentally
* relies on the consideration that every contiguous range in a space of b bits
* can be converted into b * 2 netmasks, from Theorem 3 in [Rottenstreich 2010],
* as also illustrated in Section 9 of [Kogan 2014].
*
* Classification against a number of entries, that require matching given bits
* of a packet field, is performed by grouping those bits in sets of arbitrary
* size, and classifying packet bits one group at a time.
*
* Example:
* to match the source port (16 bits) of a packet, we can divide those 16 bits
* in 4 groups of 4 bits each. Given the entry:
* 0000 0001 0101 1001
* and a packet with source port:
* 0000 0001 1010 1001
* first and second groups match, but the third doesn't. We conclude that the
* packet doesn't match the given entry.
*
* Translate the set to a sequence of lookup tables, one per field. Each table
* has two dimensions: bit groups to be matched for a single packet field, and
* all the possible values of said groups (buckets). Input entries are
* represented as one or more rules, depending on the number of composing
* netmasks for the given field specifier, and a group match is indicated as a
* set bit, with number corresponding to the rule index, in all the buckets
* whose value matches the entry for a given group.
*
* Rules are mapped between fields through an array of x, n pairs, with each
* item mapping a matched rule to one or more rules. The position of the pair in
* the array indicates the matched rule to be mapped to the next field, x
* indicates the first rule index in the next field, and n the amount of
* next-field rules the current rule maps to.
*
* The mapping array for the last field maps to the desired references.
*
* To match, we perform table lookups using the values of grouped packet bits,
* and use a sequence of bitwise operations to progressively evaluate rule
* matching.
*
* A stand-alone, reference implementation, also including notes about possible
* future optimisations, is available at:
* https://pipapo.lameexcu.se/
*
* Insertion
* ---------
*
* - For each packet field:
*
* - divide the b packet bits we want to classify into groups of size t,
* obtaining ceil(b / t) groups
*
* Example: match on destination IP address, with t = 4: 32 bits, 8 groups
* of 4 bits each
*
* - allocate a lookup table with one column ("bucket") for each possible
* value of a group, and with one row for each group
*
* Example: 8 groups, 2^4 buckets:
*
* ::
*
* bucket
* group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
* 0
* 1
* 2
* 3
* 4
* 5
* 6
* 7
*
* - map the bits we want to classify for the current field, for a given
* entry, to a single rule for non-ranged and netmask set items, and to one
* or multiple rules for ranges. Ranges are expanded to composing netmasks
* by pipapo_expand().
*
* Example: 2 entries, 10.0.0.5:1024 and 192.168.1.0-192.168.2.1:2048
* - rule #0: 10.0.0.5
* - rule #1: 192.168.1.0/24
* - rule #2: 192.168.2.0/31
*
* - insert references to the rules in the lookup table, selecting buckets
* according to bit values of a rule in the given group. This is done by
* pipapo_insert().
*
* Example: given:
* - rule #0: 10.0.0.5 mapping to buckets
* < 0 10 0 0 0 0 0 5 >
* - rule #1: 192.168.1.0/24 mapping to buckets
* < 12 0 10 8 0 1 < 0..15 > < 0..15 > >
* - rule #2: 192.168.2.0/31 mapping to buckets
* < 12 0 10 8 0 2 0 < 0..1 > >
*
* these bits are set in the lookup table:
*
* ::
*
* bucket
* group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
* 0 0 1,2
* 1 1,2 0
* 2 0 1,2
* 3 0 1,2
* 4 0,1,2
* 5 0 1 2
* 6 0,1,2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
* 7 1,2 1,2 1 1 1 0,1 1 1 1 1 1 1 1 1 1 1
*
* - if this is not the last field in the set, fill a mapping array that maps
* rules from the lookup table to rules belonging to the same entry in
* the next lookup table, done by pipapo_map().
*
* Note that as rules map to contiguous ranges of rules, given how netmask
* expansion and insertion is performed, &union nft_pipapo_map_bucket stores
* this information as pairs of first rule index, rule count.
*
* Example: 2 entries, 10.0.0.5:1024 and 192.168.1.0-192.168.2.1:2048,
* given lookup table #0 for field 0 (see example above):
*
* ::
*
* bucket
* group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
* 0 0 1,2
* 1 1,2 0
* 2 0 1,2
* 3 0 1,2
* 4 0,1,2
* 5 0 1 2
* 6 0,1,2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
* 7 1,2 1,2 1 1 1 0,1 1 1 1 1 1 1 1 1 1 1
*
* and lookup table #1 for field 1 with:
* - rule #0: 1024 mapping to buckets
* < 0 0 4 0 >
* - rule #1: 2048 mapping to buckets
* < 0 0 5 0 >
*
* ::
*
* bucket
* group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
* 0 0,1
* 1 0,1
* 2 0 1
* 3 0,1
*
* we need to map rules for 10.0.0.5 in lookup table #0 (rule #0) to 1024
* in lookup table #1 (rule #0) and rules for 192.168.1.0-192.168.2.1
* (rules #1, #2) to 2048 in lookup table #2 (rule #1):
*
* ::
*
* rule indices in current field: 0 1 2
* map to rules in next field: 0 1 1
*
* - if this is the last field in the set, fill a mapping array that maps
* rules from the last lookup table to element pointers, also done by
* pipapo_map().
*
* Note that, in this implementation, we have two elements (start, end) for
* each entry. The pointer to the end element is stored in this array, and
* the pointer to the start element is linked from it.
*
* Example: entry 10.0.0.5:1024 has a corresponding &struct nft_pipapo_elem
* pointer, 0x66, and element for 192.168.1.0-192.168.2.1:2048 is at 0x42.
* From the rules of lookup table #1 as mapped above:
*
* ::
*
* rule indices in last field: 0 1
* map to elements: 0x66 0x42
*
*
* Matching
* --------
*
* We use a result bitmap, with the size of a single lookup table bucket, to
* represent the matching state that applies at every algorithm step. This is
* done by pipapo_lookup().
*
* - For each packet field:
*
* - start with an all-ones result bitmap (res_map in pipapo_lookup())
*
* - perform a lookup into the table corresponding to the current field,
* for each group, and at every group, AND the current result bitmap with
* the value from the lookup table bucket
*
* ::
*
* Example: 192.168.1.5 < 12 0 10 8 0 1 0 5 >, with lookup table from
* insertion examples.
* Lookup table buckets are at least 3 bits wide, we'll assume 8 bits for
* convenience in this example. Initial result bitmap is 0xff, the steps
* below show the value of the result bitmap after each group is processed:
*
* bucket
* group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
* 0 0 1,2
* result bitmap is now: 0xff & 0x6 [bucket 12] = 0x6
*
* 1 1,2 0
* result bitmap is now: 0x6 & 0x6 [bucket 0] = 0x6
*
* 2 0 1,2
* result bitmap is now: 0x6 & 0x6 [bucket 10] = 0x6
*
* 3 0 1,2
* result bitmap is now: 0x6 & 0x6 [bucket 8] = 0x6
*
* 4 0,1,2
* result bitmap is now: 0x6 & 0x7 [bucket 0] = 0x6
*
* 5 0 1 2
* result bitmap is now: 0x6 & 0x2 [bucket 1] = 0x2
*
* 6 0,1,2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
* result bitmap is now: 0x2 & 0x7 [bucket 0] = 0x2
*
* 7 1,2 1,2 1 1 1 0,1 1 1 1 1 1 1 1 1 1 1
* final result bitmap for this field is: 0x2 & 0x3 [bucket 5] = 0x2
*
* - at the next field, start with a new, all-zeroes result bitmap. For each
* bit set in the previous result bitmap, fill the new result bitmap
* (fill_map in pipapo_lookup()) with the rule indices from the
* corresponding buckets of the mapping field for this field, done by
* pipapo_refill()
*
* Example: with mapping table from insertion examples, with the current
* result bitmap from the previous example, 0x02:
*
* ::
*
* rule indices in current field: 0 1 2
* map to rules in next field: 0 1 1
*
* the new result bitmap will be 0x02: rule 1 was set, and rule 1 will be
* set.
*
* We can now extend this example to cover the second iteration of the step
* above (lookup and AND bitmap): assuming the port field is
* 2048 < 0 0 5 0 >, with starting result bitmap 0x2, and lookup table
* for "port" field from pre-computation example:
*
* ::
*
* bucket
* group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
* 0 0,1
* 1 0,1
* 2 0 1
* 3 0,1
*
* operations are: 0x2 & 0x3 [bucket 0] & 0x3 [bucket 0] & 0x2 [bucket 5]
* & 0x3 [bucket 0], resulting bitmap is 0x2.
*
* - if this is the last field in the set, look up the value from the mapping
* array corresponding to the final result bitmap
*
* Example: 0x2 resulting bitmap from 192.168.1.5:2048, mapping array for
* last field from insertion example:
*
* ::
*
* rule indices in last field: 0 1
* map to elements: 0x66 0x42
*
* the matching element is at 0x42.
*
*
* References
* ----------
*
* [Ligatti 2010]
* A Packet-classification Algorithm for Arbitrary Bitmask Rules, with
* Automatic Time-space Tradeoffs
* Jay Ligatti, Josh Kuhn, and Chris Gage.
* Proceedings of the IEEE International Conference on Computer
* Communication Networks (ICCCN), August 2010.
* https://www.cse.usf.edu/~ligatti/papers/grouper-conf.pdf
*
* [Rottenstreich 2010]
* Worst-Case TCAM Rule Expansion
* Ori Rottenstreich and Isaac Keslassy.
* 2010 Proceedings IEEE INFOCOM, San Diego, CA, 2010.
* http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.212.4592&rep=rep1&type=pdf
*
* [Kogan 2014]
* SAX-PAC (Scalable And eXpressive PAcket Classification)
* Kirill Kogan, Sergey Nikolenko, Ori Rottenstreich, William Culhane,
* and Patrick Eugster.
* Proceedings of the 2014 ACM conference on SIGCOMM, August 2014.
* https://www.sigcomm.org/sites/default/files/ccr/papers/2014/August/2619239-2626294.pdf
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <uapi/linux/netfilter/nf_tables.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include "nft_set_pipapo_avx2.h"
#include "nft_set_pipapo.h"
/* Current working bitmap index, toggled between field matches */
static DEFINE_PER_CPU(bool, nft_pipapo_scratch_index);
/**
* pipapo_refill() - For each set bit, set bits from selected mapping table item
* @map: Bitmap to be scanned for set bits
* @len: Length of bitmap in longs
* @rules: Number of rules in field
* @dst: Destination bitmap
* @mt: Mapping table containing bit set specifiers
* @match_only: Find a single bit and return, don't fill
*
* Iteration over set bits with __builtin_ctzl(): Daniel Lemire, public domain.
*
* For each bit set in map, select the bucket from mapping table with index
* corresponding to the position of the bit set. Use start bit and amount of
* bits specified in bucket to fill region in dst.
*
* Return: -1 on no match, bit position on 'match_only', 0 otherwise.
*/
int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
union nft_pipapo_map_bucket *mt, bool match_only)
{
unsigned long bitset;
int k, ret = -1;
for (k = 0; k < len; k++) {
bitset = map[k];
while (bitset) {
unsigned long t = bitset & -bitset;
int r = __builtin_ctzl(bitset);
int i = k * BITS_PER_LONG + r;
if (unlikely(i >= rules)) {
map[k] = 0;
return -1;
}
if (match_only) {
bitmap_clear(map, i, 1);
return i;
}
ret = 0;
bitmap_set(dst, mt[i].to, mt[i].n);
bitset ^= t;
}
map[k] = 0;
}
return ret;
}
/**
* nft_pipapo_lookup() - Lookup function
* @net: Network namespace
* @set: nftables API set representation
* @key: nftables API element representation containing key data
* @ext: nftables API extension pointer, filled with matching reference
*
* For more details, see DOC: Theory of Operation.
*
* Return: true on match, false otherwise.
*/
bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
const u32 *key, const struct nft_set_ext **ext)
{
struct nft_pipapo *priv = nft_set_priv(set);
unsigned long *res_map, *fill_map;
u8 genmask = nft_genmask_cur(net);
const u8 *rp = (const u8 *)key;
struct nft_pipapo_match *m;
struct nft_pipapo_field *f;
bool map_index;
int i;
local_bh_disable();
map_index = raw_cpu_read(nft_pipapo_scratch_index);
m = rcu_dereference(priv->match);
if (unlikely(!m || !*raw_cpu_ptr(m->scratch)))
goto out;
res_map = *raw_cpu_ptr(m->scratch) + (map_index ? m->bsize_max : 0);
fill_map = *raw_cpu_ptr(m->scratch) + (map_index ? 0 : m->bsize_max);
memset(res_map, 0xff, m->bsize_max * sizeof(*res_map));
nft_pipapo_for_each_field(f, i, m) {
bool last = i == m->field_count - 1;
int b;
/* For each bit group: select lookup table bucket depending on
* packet bytes value, then AND bucket value
*/
if (likely(f->bb == 8))
pipapo_and_field_buckets_8bit(f, res_map, rp);
else
pipapo_and_field_buckets_4bit(f, res_map, rp);
NFT_PIPAPO_GROUP_BITS_ARE_8_OR_4;
rp += f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f);
/* Now populate the bitmap for the next field, unless this is
* the last field, in which case return the matched 'ext'
* pointer if any.
*
* Now res_map contains the matching bitmap, and fill_map is the
* bitmap for the next field.
*/
next_match:
b = pipapo_refill(res_map, f->bsize, f->rules, fill_map, f->mt,
last);
if (b < 0) {
raw_cpu_write(nft_pipapo_scratch_index, map_index);
local_bh_enable();
return false;
}
if (last) {
*ext = &f->mt[b].e->ext;
if (unlikely(nft_set_elem_expired(*ext) ||
!nft_set_elem_active(*ext, genmask)))
goto next_match;
/* Last field: we're just returning the key without
* filling the initial bitmap for the next field, so the
* current inactive bitmap is clean and can be reused as
* *next* bitmap (not initial) for the next packet.
*/
raw_cpu_write(nft_pipapo_scratch_index, map_index);
local_bh_enable();
return true;
}
/* Swap bitmap indices: res_map is the initial bitmap for the
* next field, and fill_map is guaranteed to be all-zeroes at
* this point.
*/
map_index = !map_index;
swap(res_map, fill_map);
rp += NFT_PIPAPO_GROUPS_PADDING(f);
}
out:
local_bh_enable();
return false;
}
/**
* pipapo_get() - Get matching element reference given key data
* @net: Network namespace
* @set: nftables API set representation
* @data: Key data to be matched against existing elements
* @genmask: If set, check that element is active in given genmask
*
* This is essentially the same as the lookup function, except that it matches
* key data against the uncommitted copy and doesn't use preallocated maps for
* bitmap results.
*
* Return: pointer to &struct nft_pipapo_elem on match, error pointer otherwise.
*/
static struct nft_pipapo_elem *pipapo_get(const struct net *net,
const struct nft_set *set,
const u8 *data, u8 genmask)
{
struct nft_pipapo_elem *ret = ERR_PTR(-ENOENT);
struct nft_pipapo *priv = nft_set_priv(set);
struct nft_pipapo_match *m = priv->clone;
unsigned long *res_map, *fill_map = NULL;
struct nft_pipapo_field *f;
int i;
res_map = kmalloc_array(m->bsize_max, sizeof(*res_map), GFP_ATOMIC);
if (!res_map) {
ret = ERR_PTR(-ENOMEM);
goto out;
}
fill_map = kcalloc(m->bsize_max, sizeof(*res_map), GFP_ATOMIC);
if (!fill_map) {
ret = ERR_PTR(-ENOMEM);
goto out;
}
memset(res_map, 0xff, m->bsize_max * sizeof(*res_map));
nft_pipapo_for_each_field(f, i, m) {
bool last = i == m->field_count - 1;
int b;
/* For each bit group: select lookup table bucket depending on
* packet bytes value, then AND bucket value
*/
if (f->bb == 8)
pipapo_and_field_buckets_8bit(f, res_map, data);
else if (f->bb == 4)
pipapo_and_field_buckets_4bit(f, res_map, data);
else
BUG();
data += f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f);
/* Now populate the bitmap for the next field, unless this is
* the last field, in which case return the matched 'ext'
* pointer if any.
*
* Now res_map contains the matching bitmap, and fill_map is the
* bitmap for the next field.
*/
next_match:
b = pipapo_refill(res_map, f->bsize, f->rules, fill_map, f->mt,
last);
if (b < 0)
goto out;
if (last) {
if (nft_set_elem_expired(&f->mt[b].e->ext))
goto next_match;
if ((genmask &&
!nft_set_elem_active(&f->mt[b].e->ext, genmask)))
goto next_match;
ret = f->mt[b].e;
goto out;
}
data += NFT_PIPAPO_GROUPS_PADDING(f);
/* Swap bitmap indices: fill_map will be the initial bitmap for
* the next field (i.e. the new res_map), and res_map is
* guaranteed to be all-zeroes at this point, ready to be filled
* according to the next mapping table.
*/
swap(res_map, fill_map);
}
out:
kfree(fill_map);
kfree(res_map);
return ret;
}
/**
* nft_pipapo_get() - Get matching element reference given key data
* @net: Network namespace
* @set: nftables API set representation
* @elem: nftables API element representation containing key data
* @flags: Unused
*/
static void *nft_pipapo_get(const struct net *net, const struct nft_set *set,
const struct nft_set_elem *elem, unsigned int flags)
{
return pipapo_get(net, set, (const u8 *)elem->key.val.data,
nft_genmask_cur(net));
}
/**
* pipapo_resize() - Resize lookup or mapping table, or both
* @f: Field containing lookup and mapping tables
* @old_rules: Previous amount of rules in field
* @rules: New amount of rules
*
* Increase, decrease or maintain tables size depending on new amount of rules,
* and copy data over. In case the new size is smaller, throw away data for
* highest-numbered rules.
*
* Return: 0 on success, -ENOMEM on allocation failure.
*/
static int pipapo_resize(struct nft_pipapo_field *f, int old_rules, int rules)
{
long *new_lt = NULL, *new_p, *old_lt = f->lt, *old_p;
union nft_pipapo_map_bucket *new_mt, *old_mt = f->mt;
size_t new_bucket_size, copy;
int group, bucket;
new_bucket_size = DIV_ROUND_UP(rules, BITS_PER_LONG);
#ifdef NFT_PIPAPO_ALIGN
new_bucket_size = roundup(new_bucket_size,
NFT_PIPAPO_ALIGN / sizeof(*new_lt));
#endif
if (new_bucket_size == f->bsize)
goto mt;
if (new_bucket_size > f->bsize)
copy = f->bsize;
else
copy = new_bucket_size;
new_lt = kvzalloc(f->groups * NFT_PIPAPO_BUCKETS(f->bb) *
new_bucket_size * sizeof(*new_lt) +
NFT_PIPAPO_ALIGN_HEADROOM,
GFP_KERNEL);
if (!new_lt)
return -ENOMEM;
new_p = NFT_PIPAPO_LT_ALIGN(new_lt);
old_p = NFT_PIPAPO_LT_ALIGN(old_lt);
for (group = 0; group < f->groups; group++) {
for (bucket = 0; bucket < NFT_PIPAPO_BUCKETS(f->bb); bucket++) {
memcpy(new_p, old_p, copy * sizeof(*new_p));
new_p += copy;
old_p += copy;
if (new_bucket_size > f->bsize)
new_p += new_bucket_size - f->bsize;
else
old_p += f->bsize - new_bucket_size;
}
}
mt:
new_mt = kvmalloc(rules * sizeof(*new_mt), GFP_KERNEL);
if (!new_mt) {
kvfree(new_lt);
return -ENOMEM;
}
memcpy(new_mt, f->mt, min(old_rules, rules) * sizeof(*new_mt));
if (rules > old_rules) {
memset(new_mt + old_rules, 0,
(rules - old_rules) * sizeof(*new_mt));
}
if (new_lt) {
f->bsize = new_bucket_size;
NFT_PIPAPO_LT_ASSIGN(f, new_lt);
kvfree(old_lt);
}
f->mt = new_mt;
kvfree(old_mt);
return 0;
}
/**
* pipapo_bucket_set() - Set rule bit in bucket given group and group value
* @f: Field containing lookup table
* @rule: Rule index
* @group: Group index
* @v: Value of bit group
*/
static void pipapo_bucket_set(struct nft_pipapo_field *f, int rule, int group,
int v)
{
unsigned long *pos;
pos = NFT_PIPAPO_LT_ALIGN(f->lt);
pos += f->bsize * NFT_PIPAPO_BUCKETS(f->bb) * group;
pos += f->bsize * v;
__set_bit(rule, pos);
}
/**
* pipapo_lt_4b_to_8b() - Switch lookup table group width from 4 bits to 8 bits
* @old_groups: Number of current groups
* @bsize: Size of one bucket, in longs
* @old_lt: Pointer to the current lookup table
* @new_lt: Pointer to the new, pre-allocated lookup table
*
* Each bucket with index b in the new lookup table, belonging to group g, is
* filled with the bit intersection between:
* - bucket with index given by the upper 4 bits of b, from group g, and
* - bucket with index given by the lower 4 bits of b, from group g + 1
*
* That is, given buckets from the new lookup table N(x, y) and the old lookup
* table O(x, y), with x bucket index, and y group index:
*
* N(b, g) := O(b / 16, g) & O(b % 16, g + 1)
*
* This ensures equivalence of the matching results on lookup. Two examples in
* pictures:
*
* bucket
* group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 ... 254 255
* 0 ^
* 1 | ^
* ... ( & ) |
* / \ |
* / \ .-( & )-.
* / bucket \ | |
* group 0 / 1 2 3 \ 4 5 6 7 8 9 10 11 12 13 |14 15 |
* 0 / \ | |
* 1 \ | |
* 2 | --'
* 3 '-
* ...
*/
static void pipapo_lt_4b_to_8b(int old_groups, int bsize,
unsigned long *old_lt, unsigned long *new_lt)
{
int g, b, i;
for (g = 0; g < old_groups / 2; g++) {
int src_g0 = g * 2, src_g1 = g * 2 + 1;
for (b = 0; b < NFT_PIPAPO_BUCKETS(8); b++) {
int src_b0 = b / NFT_PIPAPO_BUCKETS(4);
int src_b1 = b % NFT_PIPAPO_BUCKETS(4);
int src_i0 = src_g0 * NFT_PIPAPO_BUCKETS(4) + src_b0;
int src_i1 = src_g1 * NFT_PIPAPO_BUCKETS(4) + src_b1;
for (i = 0; i < bsize; i++) {
*new_lt = old_lt[src_i0 * bsize + i] &
old_lt[src_i1 * bsize + i];
new_lt++;
}
}
}
}
/**
* pipapo_lt_8b_to_4b() - Switch lookup table group width from 8 bits to 4 bits
* @old_groups: Number of current groups
* @bsize: Size of one bucket, in longs
* @old_lt: Pointer to the current lookup table
* @new_lt: Pointer to the new, pre-allocated lookup table
*
* Each bucket with index b in the new lookup table, belonging to group g, is
* filled with the bit union of:
* - all the buckets with index such that the upper four bits of the lower byte
* equal b, from group g, with g odd
* - all the buckets with index such that the lower four bits equal b, from
* group g, with g even
*
* That is, given buckets from the new lookup table N(x, y) and the old lookup
* table O(x, y), with x bucket index, and y group index:
*
* - with g odd: N(b, g) := U(O(x, g) for each x : x = (b & 0xf0) >> 4)
* - with g even: N(b, g) := U(O(x, g) for each x : x = b & 0x0f)
*
* where U() denotes the arbitrary union operation (binary OR of n terms). This
* ensures equivalence of the matching results on lookup.
*/
static void pipapo_lt_8b_to_4b(int old_groups, int bsize,
unsigned long *old_lt, unsigned long *new_lt)
{
int g, b, bsrc, i;
memset(new_lt, 0, old_groups * 2 * NFT_PIPAPO_BUCKETS(4) * bsize *
sizeof(unsigned long));
for (g = 0; g < old_groups * 2; g += 2) {
int src_g = g / 2;
for (b = 0; b < NFT_PIPAPO_BUCKETS(4); b++) {
for (bsrc = NFT_PIPAPO_BUCKETS(8) * src_g;
bsrc < NFT_PIPAPO_BUCKETS(8) * (src_g + 1);
bsrc++) {
if (((bsrc & 0xf0) >> 4) != b)
continue;
for (i = 0; i < bsize; i++)
new_lt[i] |= old_lt[bsrc * bsize + i];
}
new_lt += bsize;
}
for (b = 0; b < NFT_PIPAPO_BUCKETS(4); b++) {
for (bsrc = NFT_PIPAPO_BUCKETS(8) * src_g;
bsrc < NFT_PIPAPO_BUCKETS(8) * (src_g + 1);
bsrc++) {
if ((bsrc & 0x0f) != b)
continue;
for (i = 0; i < bsize; i++)
new_lt[i] |= old_lt[bsrc * bsize + i];
}
new_lt += bsize;
}
}
}
/**
* pipapo_lt_bits_adjust() - Adjust group size for lookup table if needed
* @f: Field containing lookup table
*/
static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
{
unsigned long *new_lt;
int groups, bb;
size_t lt_size;
lt_size = f->groups * NFT_PIPAPO_BUCKETS(f->bb) * f->bsize *
sizeof(*f->lt);
if (f->bb == NFT_PIPAPO_GROUP_BITS_SMALL_SET &&
lt_size > NFT_PIPAPO_LT_SIZE_HIGH) {
groups = f->groups * 2;
bb = NFT_PIPAPO_GROUP_BITS_LARGE_SET;
lt_size = groups * NFT_PIPAPO_BUCKETS(bb) * f->bsize *
sizeof(*f->lt);
} else if (f->bb == NFT_PIPAPO_GROUP_BITS_LARGE_SET &&
lt_size < NFT_PIPAPO_LT_SIZE_LOW) {
groups = f->groups / 2;
bb = NFT_PIPAPO_GROUP_BITS_SMALL_SET;
lt_size = groups * NFT_PIPAPO_BUCKETS(bb) * f->bsize *
sizeof(*f->lt);
/* Don't increase group width if the resulting lookup table size
* would exceed the upper size threshold for a "small" set.
*/
if (lt_size > NFT_PIPAPO_LT_SIZE_HIGH)
return;
} else {
return;
}
new_lt = kvzalloc(lt_size + NFT_PIPAPO_ALIGN_HEADROOM, GFP_KERNEL);
if (!new_lt)
return;
NFT_PIPAPO_GROUP_BITS_ARE_8_OR_4;
if (f->bb == 4 && bb == 8) {
pipapo_lt_4b_to_8b(f->groups, f->bsize,
NFT_PIPAPO_LT_ALIGN(f->lt),
NFT_PIPAPO_LT_ALIGN(new_lt));
} else if (f->bb == 8 && bb == 4) {
pipapo_lt_8b_to_4b(f->groups, f->bsize,
NFT_PIPAPO_LT_ALIGN(f->lt),
NFT_PIPAPO_LT_ALIGN(new_lt));
} else {
BUG();
}
f->groups = groups;
f->bb = bb;
kvfree(f->lt);
NFT_PIPAPO_LT_ASSIGN(f, new_lt);
}
/**
* pipapo_insert() - Insert new rule in field given input key and mask length
* @f: Field containing lookup table
* @k: Input key for classification, without nftables padding
* @mask_bits: Length of mask; matches field length for non-ranged entry
*
* Insert a new rule reference in lookup buckets corresponding to k and
* mask_bits.
*
* Return: 1 on success (one rule inserted), negative error code on failure.
*/
static int pipapo_insert(struct nft_pipapo_field *f, const uint8_t *k,
int mask_bits)
{
int rule = f->rules, group, ret, bit_offset = 0;
ret = pipapo_resize(f, f->rules, f->rules + 1);
if (ret)
return ret;
f->rules++;
for (group = 0; group < f->groups; group++) {
int i, v;
u8 mask;
v = k[group / (BITS_PER_BYTE / f->bb)];
v &= GENMASK(BITS_PER_BYTE - bit_offset - 1, 0);
v >>= (BITS_PER_BYTE - bit_offset) - f->bb;
bit_offset += f->bb;
bit_offset %= BITS_PER_BYTE;
if (mask_bits >= (group + 1) * f->bb) {
/* Not masked */
pipapo_bucket_set(f, rule, group, v);
} else if (mask_bits <= group * f->bb) {
/* Completely masked */
for (i = 0; i < NFT_PIPAPO_BUCKETS(f->bb); i++)
pipapo_bucket_set(f, rule, group, i);
} else {
/* The mask limit falls on this group */
mask = GENMASK(f->bb - 1, 0);
mask >>= mask_bits - group * f->bb;
for (i = 0; i < NFT_PIPAPO_BUCKETS(f->bb); i++) {
if ((i & ~mask) == (v & ~mask))
pipapo_bucket_set(f, rule, group, i);
}
}
}
pipapo_lt_bits_adjust(f);
return 1;
}
/**
* pipapo_step_diff() - Check if setting @step bit in netmask would change it
* @base: Mask we are expanding
* @step: Step bit for given expansion step
* @len: Total length of mask space (set and unset bits), bytes
*
* Convenience function for mask expansion.
*
* Return: true if step bit changes mask (i.e. isn't set), false otherwise.
*/
static bool pipapo_step_diff(u8 *base, int step, int len)
{
/* Network order, byte-addressed */
#ifdef __BIG_ENDIAN__
return !(BIT(step % BITS_PER_BYTE) & base[step / BITS_PER_BYTE]);
#else
return !(BIT(step % BITS_PER_BYTE) &
base[len - 1 - step / BITS_PER_BYTE]);
#endif
}
/**
* pipapo_step_after_end() - Check if mask exceeds range end with given step
* @base: Mask we are expanding
* @end: End of range
* @step: Step bit for given expansion step, highest bit to be set
* @len: Total length of mask space (set and unset bits), bytes
*
* Convenience function for mask expansion.
*
* Return: true if mask exceeds range setting step bits, false otherwise.
*/
static bool pipapo_step_after_end(const u8 *base, const u8 *end, int step,
int len)
{
u8 tmp[NFT_PIPAPO_MAX_BYTES];
int i;
memcpy(tmp, base, len);
/* Network order, byte-addressed */
for (i = 0; i <= step; i++)
#ifdef __BIG_ENDIAN__
tmp[i / BITS_PER_BYTE] |= BIT(i % BITS_PER_BYTE);
#else
tmp[len - 1 - i / BITS_PER_BYTE] |= BIT(i % BITS_PER_BYTE);
#endif
return memcmp(tmp, end, len) > 0;
}
/**
* pipapo_base_sum() - Sum step bit to given len-sized netmask base with carry
* @base: Netmask base
* @step: Step bit to sum
* @len: Netmask length, bytes
*/
static void pipapo_base_sum(u8 *base, int step, int len)
{
bool carry = false;
int i;
/* Network order, byte-addressed */
#ifdef __BIG_ENDIAN__
for (i = step / BITS_PER_BYTE; i < len; i++) {
#else
for (i = len - 1 - step / BITS_PER_BYTE; i >= 0; i--) {
#endif
if (carry)
base[i]++;
else
base[i] += 1 << (step % BITS_PER_BYTE);
if (base[i])
break;
carry = true;
}
}
/**
* pipapo_expand() - Expand to composing netmasks, insert into lookup table
* @f: Field containing lookup table
* @start: Start of range
* @end: End of range
* @len: Length of value in bits
*
* Expand range to composing netmasks and insert corresponding rule references
* in lookup buckets.
*
* Return: number of inserted rules on success, negative error code on failure.
*/
static int pipapo_expand(struct nft_pipapo_field *f,
const u8 *start, const u8 *end, int len)
{
int step, masks = 0, bytes = DIV_ROUND_UP(len, BITS_PER_BYTE);
u8 base[NFT_PIPAPO_MAX_BYTES];
memcpy(base, start, bytes);
while (memcmp(base, end, bytes) <= 0) {
int err;
step = 0;
while (pipapo_step_diff(base, step, bytes)) {
if (pipapo_step_after_end(base, end, step, bytes))
break;
step++;
if (step >= len) {
if (!masks) {
err = pipapo_insert(f, base, 0);
if (err < 0)
return err;
masks = 1;
}
goto out;
}
}
err = pipapo_insert(f, base, len - step);
if (err < 0)
return err;
masks++;
pipapo_base_sum(base, step, bytes);
}
out:
return masks;
}
/**
* pipapo_map() - Insert rules in mapping tables, mapping them between fields
* @m: Matching data, including mapping table
* @map: Table of rule maps: array of first rule and amount of rules
* in next field a given rule maps to, for each field
* @e: For last field, nft_set_ext pointer matching rules map to
*/
static void pipapo_map(struct nft_pipapo_match *m,
union nft_pipapo_map_bucket map[NFT_PIPAPO_MAX_FIELDS],
struct nft_pipapo_elem *e)
{
struct nft_pipapo_field *f;
int i, j;
for (i = 0, f = m->f; i < m->field_count - 1; i++, f++) {
for (j = 0; j < map[i].n; j++) {
f->mt[map[i].to + j].to = map[i + 1].to;
f->mt[map[i].to + j].n = map[i + 1].n;
}
}
/* Last field: map to ext instead of mapping to next field */
for (j = 0; j < map[i].n; j++)
f->mt[map[i].to + j].e = e;
}
/**
* pipapo_realloc_scratch() - Reallocate scratch maps for partial match results
* @clone: Copy of matching data with pending insertions and deletions
* @bsize_max: Maximum bucket size, scratch maps cover two buckets
*
* Return: 0 on success, -ENOMEM on failure.
*/
static int pipapo_realloc_scratch(struct nft_pipapo_match *clone,
unsigned long bsize_max)
{
int i;
for_each_possible_cpu(i) {
unsigned long *scratch;
#ifdef NFT_PIPAPO_ALIGN
unsigned long *scratch_aligned;
#endif
scratch = kzalloc_node(bsize_max * sizeof(*scratch) * 2 +
NFT_PIPAPO_ALIGN_HEADROOM,
GFP_KERNEL, cpu_to_node(i));
if (!scratch) {
/* On failure, there's no need to undo previous
* allocations: this means that some scratch maps have
* a bigger allocated size now (this is only called on
* insertion), but the extra space won't be used by any
* CPU as new elements are not inserted and m->bsize_max
* is not updated.
*/
return -ENOMEM;
}
kfree(*per_cpu_ptr(clone->scratch, i));
*per_cpu_ptr(clone->scratch, i) = scratch;
#ifdef NFT_PIPAPO_ALIGN
scratch_aligned = NFT_PIPAPO_LT_ALIGN(scratch);
*per_cpu_ptr(clone->scratch_aligned, i) = scratch_aligned;
#endif
}
return 0;
}
/**
* nft_pipapo_insert() - Validate and insert ranged elements
* @net: Network namespace
* @set: nftables API set representation
* @elem: nftables API element representation containing key data
* @ext2: Filled with pointer to &struct nft_set_ext in inserted element
*
* Return: 0 on success, error pointer on failure.
*/
static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
const struct nft_set_elem *elem,
struct nft_set_ext **ext2)
{
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS];
const u8 *start = (const u8 *)elem->key.val.data, *end;
struct nft_pipapo_elem *e = elem->priv, *dup;
struct nft_pipapo *priv = nft_set_priv(set);
struct nft_pipapo_match *m = priv->clone;
u8 genmask = nft_genmask_next(net);
struct nft_pipapo_field *f;
const u8 *start_p, *end_p;
int i, bsize_max, err = 0;
if (nft_set_ext_exists(ext, NFT_SET_EXT_KEY_END))
end = (const u8 *)nft_set_ext_key_end(ext)->data;
else
end = start;
dup = pipapo_get(net, set, start, genmask);
if (!IS_ERR(dup)) {
/* Check if we already have the same exact entry */
const struct nft_data *dup_key, *dup_end;
dup_key = nft_set_ext_key(&dup->ext);
if (nft_set_ext_exists(&dup->ext, NFT_SET_EXT_KEY_END))
dup_end = nft_set_ext_key_end(&dup->ext);
else
dup_end = dup_key;
if (!memcmp(start, dup_key->data, sizeof(*dup_key->data)) &&
!memcmp(end, dup_end->data, sizeof(*dup_end->data))) {
*ext2 = &dup->ext;
return -EEXIST;
}
return -ENOTEMPTY;
}
if (PTR_ERR(dup) == -ENOENT) {
/* Look for partially overlapping entries */
dup = pipapo_get(net, set, end, nft_genmask_next(net));
}
if (PTR_ERR(dup) != -ENOENT) {
if (IS_ERR(dup))
return PTR_ERR(dup);
*ext2 = &dup->ext;
return -ENOTEMPTY;
}
/* Validate */
start_p = start;
end_p = end;
nft_pipapo_for_each_field(f, i, m) {
if (f->rules >= (unsigned long)NFT_PIPAPO_RULE0_MAX)
return -ENOSPC;
if (memcmp(start_p, end_p,
f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f)) > 0)
return -EINVAL;
start_p += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
end_p += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
}
/* Insert */
priv->dirty = true;
bsize_max = m->bsize_max;
nft_pipapo_for_each_field(f, i, m) {
int ret;
rulemap[i].to = f->rules;
ret = memcmp(start, end,
f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f));
if (!ret)
ret = pipapo_insert(f, start, f->groups * f->bb);
else
ret = pipapo_expand(f, start, end, f->groups * f->bb);
if (ret < 0)
return ret;
if (f->bsize > bsize_max)
bsize_max = f->bsize;
rulemap[i].n = ret;
start += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
end += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
}
if (!*get_cpu_ptr(m->scratch) || bsize_max > m->bsize_max) {
put_cpu_ptr(m->scratch);
err = pipapo_realloc_scratch(m, bsize_max);
if (err)
return err;
m->bsize_max = bsize_max;
} else {
put_cpu_ptr(m->scratch);
}
*ext2 = &e->ext;
pipapo_map(m, rulemap, e);
return 0;
}
/**
* pipapo_clone() - Clone matching data to create new working copy
* @old: Existing matching data
*
* Return: copy of matching data passed as 'old', error pointer on failure
*/
static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
{
struct nft_pipapo_field *dst, *src;
struct nft_pipapo_match *new;
int i;
new = kmalloc(struct_size(new, f, old->field_count), GFP_KERNEL);
if (!new)
return ERR_PTR(-ENOMEM);
new->field_count = old->field_count;
new->bsize_max = old->bsize_max;
new->scratch = alloc_percpu(*new->scratch);
if (!new->scratch)
goto out_scratch;
#ifdef NFT_PIPAPO_ALIGN
new->scratch_aligned = alloc_percpu(*new->scratch_aligned);
if (!new->scratch_aligned)
goto out_scratch;
#endif
for_each_possible_cpu(i)
*per_cpu_ptr(new->scratch, i) = NULL;
if (pipapo_realloc_scratch(new, old->bsize_max))
goto out_scratch_realloc;
rcu_head_init(&new->rcu);
src = old->f;
dst = new->f;
for (i = 0; i < old->field_count; i++) {
unsigned long *new_lt;
memcpy(dst, src, offsetof(struct nft_pipapo_field, lt));
new_lt = kvzalloc(src->groups * NFT_PIPAPO_BUCKETS(src->bb) *
src->bsize * sizeof(*dst->lt) +
NFT_PIPAPO_ALIGN_HEADROOM,
GFP_KERNEL);
if (!new_lt)
goto out_lt;
NFT_PIPAPO_LT_ASSIGN(dst, new_lt);
memcpy(NFT_PIPAPO_LT_ALIGN(new_lt),
NFT_PIPAPO_LT_ALIGN(src->lt),
src->bsize * sizeof(*dst->lt) *
src->groups * NFT_PIPAPO_BUCKETS(src->bb));
dst->mt = kvmalloc(src->rules * sizeof(*src->mt), GFP_KERNEL);
if (!dst->mt)
goto out_mt;
memcpy(dst->mt, src->mt, src->rules * sizeof(*src->mt));
src++;
dst++;
}
return new;
out_mt:
kvfree(dst->lt);
out_lt:
for (dst--; i > 0; i--) {
kvfree(dst->mt);
kvfree(dst->lt);
dst--;
}
out_scratch_realloc:
for_each_possible_cpu(i)
kfree(*per_cpu_ptr(new->scratch, i));
#ifdef NFT_PIPAPO_ALIGN
free_percpu(new->scratch_aligned);
#endif
out_scratch:
free_percpu(new->scratch);
kfree(new);
return ERR_PTR(-ENOMEM);
}
/**
* pipapo_rules_same_key() - Get number of rules originated from the same entry
* @f: Field containing mapping table
* @first: Index of first rule in set of rules mapping to same entry
*
* Using the fact that all rules in a field that originated from the same entry
* will map to the same set of rules in the next field, or to the same element
* reference, return the cardinality of the set of rules that originated from
* the same entry as the rule with index @first, @first rule included.
*
* In pictures:
* rules
* field #0 0 1 2 3 4
* map to: 0 1 2-4 2-4 5-9
* . . ....... . ...
* | | | | \ \
* | | | | \ \
* | | | | \ \
* ' ' ' ' ' \
* in field #1 0 1 2 3 4 5 ...
*
* if this is called for rule 2 on field #0, it will return 3, as also rules 2
* and 3 in field 0 map to the same set of rules (2, 3, 4) in the next field.
*
* For the last field in a set, we can rely on associated entries to map to the
* same element references.
*
* Return: Number of rules that originated from the same entry as @first.
*/
static int pipapo_rules_same_key(struct nft_pipapo_field *f, int first)
{
struct nft_pipapo_elem *e = NULL; /* Keep gcc happy */
int r;
for (r = first; r < f->rules; r++) {
if (r != first && e != f->mt[r].e)
return r - first;
e = f->mt[r].e;
}
if (r != first)
return r - first;
return 0;
}
/**
* pipapo_unmap() - Remove rules from mapping tables, renumber remaining ones
* @mt: Mapping array
* @rules: Original amount of rules in mapping table
* @start: First rule index to be removed
* @n: Amount of rules to be removed
* @to_offset: First rule index, in next field, this group of rules maps to
* @is_last: If this is the last field, delete reference from mapping array
*
* This is used to unmap rules from the mapping table for a single field,
* maintaining consistency and compactness for the existing ones.
*
* In pictures: let's assume that we want to delete rules 2 and 3 from the
* following mapping array:
*
* rules
* 0 1 2 3 4
* map to: 4-10 4-10 11-15 11-15 16-18
*
* the result will be:
*
* rules
* 0 1 2
* map to: 4-10 4-10 11-13
*
* for fields before the last one. In case this is the mapping table for the
* last field in a set, and rules map to pointers to &struct nft_pipapo_elem:
*
* rules
* 0 1 2 3 4
* element pointers: 0x42 0x42 0x33 0x33 0x44
*
* the result will be:
*
* rules
* 0 1 2
* element pointers: 0x42 0x42 0x44
*/
static void pipapo_unmap(union nft_pipapo_map_bucket *mt, int rules,
int start, int n, int to_offset, bool is_last)
{
int i;
memmove(mt + start, mt + start + n, (rules - start - n) * sizeof(*mt));
memset(mt + rules - n, 0, n * sizeof(*mt));
if (is_last)
return;
for (i = start; i < rules - n; i++)
mt[i].to -= to_offset;
}
/**
* pipapo_drop() - Delete entry from lookup and mapping tables, given rule map
* @m: Matching data
* @rulemap: Table of rule maps, arrays of first rule and amount of rules
* in next field a given entry maps to, for each field
*
* For each rule in lookup table buckets mapping to this set of rules, drop
* all bits set in lookup table mapping. In pictures, assuming we want to drop
* rules 0 and 1 from this lookup table:
*
* bucket
* group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
* 0 0 1,2
* 1 1,2 0
* 2 0 1,2
* 3 0 1,2
* 4 0,1,2
* 5 0 1 2
* 6 0,1,2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
* 7 1,2 1,2 1 1 1 0,1 1 1 1 1 1 1 1 1 1 1
*
* rule 2 becomes rule 0, and the result will be:
*
* bucket
* group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
* 0 0
* 1 0
* 2 0
* 3 0
* 4 0
* 5 0
* 6 0
* 7 0 0
*
* once this is done, call unmap() to drop all the corresponding rule references
* from mapping tables.
*/
static void pipapo_drop(struct nft_pipapo_match *m,
union nft_pipapo_map_bucket rulemap[])
{
struct nft_pipapo_field *f;
int i;
nft_pipapo_for_each_field(f, i, m) {
int g;
for (g = 0; g < f->groups; g++) {
unsigned long *pos;
int b;
pos = NFT_PIPAPO_LT_ALIGN(f->lt) + g *
NFT_PIPAPO_BUCKETS(f->bb) * f->bsize;
for (b = 0; b < NFT_PIPAPO_BUCKETS(f->bb); b++) {
bitmap_cut(pos, pos, rulemap[i].to,
rulemap[i].n,
f->bsize * BITS_PER_LONG);
pos += f->bsize;
}
}
pipapo_unmap(f->mt, f->rules, rulemap[i].to, rulemap[i].n,
rulemap[i + 1].n, i == m->field_count - 1);
if (pipapo_resize(f, f->rules, f->rules - rulemap[i].n)) {
/* We can ignore this, a failure to shrink tables down
* doesn't make tables invalid.
*/
;
}
f->rules -= rulemap[i].n;
pipapo_lt_bits_adjust(f);
}
}
static void nft_pipapo_gc_deactivate(struct net *net, struct nft_set *set,
struct nft_pipapo_elem *e)
{
struct nft_set_elem elem = {
.priv = e,
};
nft_setelem_data_deactivate(net, set, &elem);
}
/**
* pipapo_gc() - Drop expired entries from set, destroy start and end elements
* @_set: nftables API set representation
* @m: Matching data
*/
static void pipapo_gc(const struct nft_set *_set, struct nft_pipapo_match *m)
{
struct nft_set *set = (struct nft_set *) _set;
struct nft_pipapo *priv = nft_set_priv(set);
struct net *net = read_pnet(&set->net);
int rules_f0, first_rule = 0;
struct nft_pipapo_elem *e;
struct nft_trans_gc *gc;
gc = nft_trans_gc_alloc(set, 0, GFP_KERNEL);
if (!gc)
return;
while ((rules_f0 = pipapo_rules_same_key(m->f, first_rule))) {
union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS];
struct nft_pipapo_field *f;
int i, start, rules_fx;
start = first_rule;
rules_fx = rules_f0;
nft_pipapo_for_each_field(f, i, m) {
rulemap[i].to = start;
rulemap[i].n = rules_fx;
if (i < m->field_count - 1) {
rules_fx = f->mt[start].n;
start = f->mt[start].to;
}
}
/* Pick the last field, and its last index */
f--;
i--;
e = f->mt[rulemap[i].to].e;
/* synchronous gc never fails, there is no need to set on
* NFT_SET_ELEM_DEAD_BIT.
*/
if (nft_set_elem_expired(&e->ext)) {
priv->dirty = true;
gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
if (!gc)
return;
nft_pipapo_gc_deactivate(net, set, e);
pipapo_drop(m, rulemap);
nft_trans_gc_elem_add(gc, e);
/* And check again current first rule, which is now the
* first we haven't checked.
*/
} else {
first_rule += rules_f0;
}
}
gc = nft_trans_gc_catchall_sync(gc);
if (gc) {
nft_trans_gc_queue_sync_done(gc);
priv->last_gc = jiffies;
}
}
/**
* pipapo_free_fields() - Free per-field tables contained in matching data
* @m: Matching data
*/
static void pipapo_free_fields(struct nft_pipapo_match *m)
{
struct nft_pipapo_field *f;
int i;
nft_pipapo_for_each_field(f, i, m) {
kvfree(f->lt);
kvfree(f->mt);
}
}
static void pipapo_free_match(struct nft_pipapo_match *m)
{
int i;
for_each_possible_cpu(i)
kfree(*per_cpu_ptr(m->scratch, i));
#ifdef NFT_PIPAPO_ALIGN
free_percpu(m->scratch_aligned);
#endif
free_percpu(m->scratch);
pipapo_free_fields(m);
kfree(m);
}
/**
* pipapo_reclaim_match - RCU callback to free fields from old matching data
* @rcu: RCU head
*/
static void pipapo_reclaim_match(struct rcu_head *rcu)
{
struct nft_pipapo_match *m;
m = container_of(rcu, struct nft_pipapo_match, rcu);
pipapo_free_match(m);
}
/**
* nft_pipapo_commit() - Replace lookup data with current working copy
* @set: nftables API set representation
*
* While at it, check if we should perform garbage collection on the working
* copy before committing it for lookup, and don't replace the table if the
* working copy doesn't have pending changes.
*
* We also need to create a new working copy for subsequent insertions and
* deletions.
*/
static void nft_pipapo_commit(const struct nft_set *set)
{
struct nft_pipapo *priv = nft_set_priv(set);
struct nft_pipapo_match *new_clone, *old;
if (time_after_eq(jiffies, priv->last_gc + nft_set_gc_interval(set)))
pipapo_gc(set, priv->clone);
if (!priv->dirty)
return;
new_clone = pipapo_clone(priv->clone);
if (IS_ERR(new_clone))
return;
priv->dirty = false;
old = rcu_access_pointer(priv->match);
rcu_assign_pointer(priv->match, priv->clone);
if (old)
call_rcu(&old->rcu, pipapo_reclaim_match);
priv->clone = new_clone;
}
static bool nft_pipapo_transaction_mutex_held(const struct nft_set *set)
{
#ifdef CONFIG_PROVE_LOCKING
const struct net *net = read_pnet(&set->net);
return lockdep_is_held(&nft_pernet(net)->commit_mutex);
#else
return true;
#endif
}
static void nft_pipapo_abort(const struct nft_set *set)
{
struct nft_pipapo *priv = nft_set_priv(set);
struct nft_pipapo_match *new_clone, *m;
if (!priv->dirty)
return;
m = rcu_dereference_protected(priv->match, nft_pipapo_transaction_mutex_held(set));
new_clone = pipapo_clone(m);
if (IS_ERR(new_clone))
return;
priv->dirty = false;
pipapo_free_match(priv->clone);
priv->clone = new_clone;
}
/**
* nft_pipapo_activate() - Mark element reference as active given key, commit
* @net: Network namespace
* @set: nftables API set representation
* @elem: nftables API element representation containing key data
*
* On insertion, elements are added to a copy of the matching data currently
* in use for lookups, and not directly inserted into current lookup data. Both
* nft_pipapo_insert() and nft_pipapo_activate() are called once for each
* element, hence we can't purpose either one as a real commit operation.
*/
static void nft_pipapo_activate(const struct net *net,
const struct nft_set *set,
const struct nft_set_elem *elem)
{
struct nft_pipapo_elem *e = elem->priv;
nft_set_elem_change_active(net, set, &e->ext);
}
/**
* pipapo_deactivate() - Check that element is in set, mark as inactive
* @net: Network namespace
* @set: nftables API set representation
* @data: Input key data
* @ext: nftables API extension pointer, used to check for end element
*
* This is a convenience function that can be called from both
* nft_pipapo_deactivate() and nft_pipapo_flush(), as they are in fact the same
* operation.
*
* Return: deactivated element if found, NULL otherwise.
*/
static void *pipapo_deactivate(const struct net *net, const struct nft_set *set,
const u8 *data, const struct nft_set_ext *ext)
{
struct nft_pipapo_elem *e;
e = pipapo_get(net, set, data, nft_genmask_next(net));
if (IS_ERR(e))
return NULL;
nft_set_elem_change_active(net, set, &e->ext);
return e;
}
/**
* nft_pipapo_deactivate() - Call pipapo_deactivate() to make element inactive
* @net: Network namespace
* @set: nftables API set representation
* @elem: nftables API element representation containing key data
*
* Return: deactivated element if found, NULL otherwise.
*/
static void *nft_pipapo_deactivate(const struct net *net,
const struct nft_set *set,
const struct nft_set_elem *elem)
{
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
return pipapo_deactivate(net, set, (const u8 *)elem->key.val.data, ext);
}
/**
* nft_pipapo_flush() - Call pipapo_deactivate() to make element inactive
* @net: Network namespace
* @set: nftables API set representation
* @elem: nftables API element representation containing key data
*
* This is functionally the same as nft_pipapo_deactivate(), with a slightly
* different interface, and it's also called once for each element in a set
* being flushed, so we can't implement, strictly speaking, a flush operation,
* which would otherwise be as simple as allocating an empty copy of the
* matching data.
*
* Note that we could in theory do that, mark the set as flushed, and ignore
* subsequent calls, but we would leak all the elements after the first one,
* because they wouldn't then be freed as result of API calls.
*
* Return: true if element was found and deactivated.
*/
static bool nft_pipapo_flush(const struct net *net, const struct nft_set *set,
void *elem)
{
struct nft_pipapo_elem *e = elem;
return pipapo_deactivate(net, set, (const u8 *)nft_set_ext_key(&e->ext),
&e->ext);
}
/**
* pipapo_get_boundaries() - Get byte interval for associated rules
* @f: Field including lookup table
* @first_rule: First rule (lowest index)
* @rule_count: Number of associated rules
* @left: Byte expression for left boundary (start of range)
* @right: Byte expression for right boundary (end of range)
*
* Given the first rule and amount of rules that originated from the same entry,
* build the original range associated with the entry, and calculate the length
* of the originating netmask.
*
* In pictures:
*
* bucket
* group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
* 0 1,2
* 1 1,2
* 2 1,2
* 3 1,2
* 4 1,2
* 5 1 2
* 6 1,2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
* 7 1,2 1,2 1 1 1 1 1 1 1 1 1 1 1 1 1 1
*
* this is the lookup table corresponding to the IPv4 range
* 192.168.1.0-192.168.2.1, which was expanded to the two composing netmasks,
* rule #1: 192.168.1.0/24, and rule #2: 192.168.2.0/31.
*
* This function fills @left and @right with the byte values of the leftmost
* and rightmost bucket indices for the lowest and highest rule indices,
* respectively. If @first_rule is 1 and @rule_count is 2, we obtain, in
* nibbles:
* left: < 12, 0, 10, 8, 0, 1, 0, 0 >
* right: < 12, 0, 10, 8, 0, 2, 2, 1 >
* corresponding to bytes:
* left: < 192, 168, 1, 0 >
* right: < 192, 168, 2, 1 >
* with mask length irrelevant here, unused on return, as the range is already
* defined by its start and end points. The mask length is relevant for a single
* ranged entry instead: if @first_rule is 1 and @rule_count is 1, we ignore
* rule 2 above: @left becomes < 192, 168, 1, 0 >, @right becomes
* < 192, 168, 1, 255 >, and the mask length, calculated from the distances
* between leftmost and rightmost bucket indices for each group, would be 24.
*
* Return: mask length, in bits.
*/
static int pipapo_get_boundaries(struct nft_pipapo_field *f, int first_rule,
int rule_count, u8 *left, u8 *right)
{
int g, mask_len = 0, bit_offset = 0;
u8 *l = left, *r = right;
for (g = 0; g < f->groups; g++) {
int b, x0, x1;
x0 = -1;
x1 = -1;
for (b = 0; b < NFT_PIPAPO_BUCKETS(f->bb); b++) {
unsigned long *pos;
pos = NFT_PIPAPO_LT_ALIGN(f->lt) +
(g * NFT_PIPAPO_BUCKETS(f->bb) + b) * f->bsize;
if (test_bit(first_rule, pos) && x0 == -1)
x0 = b;
if (test_bit(first_rule + rule_count - 1, pos))
x1 = b;
}
*l |= x0 << (BITS_PER_BYTE - f->bb - bit_offset);
*r |= x1 << (BITS_PER_BYTE - f->bb - bit_offset);
bit_offset += f->bb;
if (bit_offset >= BITS_PER_BYTE) {
bit_offset %= BITS_PER_BYTE;
l++;
r++;
}
if (x1 - x0 == 0)
mask_len += 4;
else if (x1 - x0 == 1)
mask_len += 3;
else if (x1 - x0 == 3)
mask_len += 2;
else if (x1 - x0 == 7)
mask_len += 1;
}
return mask_len;
}
/**
* pipapo_match_field() - Match rules against byte ranges
* @f: Field including the lookup table
* @first_rule: First of associated rules originating from same entry
* @rule_count: Amount of associated rules
* @start: Start of range to be matched
* @end: End of range to be matched
*
* Return: true on match, false otherwise.
*/
static bool pipapo_match_field(struct nft_pipapo_field *f,
int first_rule, int rule_count,
const u8 *start, const u8 *end)
{
u8 right[NFT_PIPAPO_MAX_BYTES] = { 0 };
u8 left[NFT_PIPAPO_MAX_BYTES] = { 0 };
pipapo_get_boundaries(f, first_rule, rule_count, left, right);
return !memcmp(start, left,
f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f)) &&
!memcmp(end, right, f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f));
}
/**
* nft_pipapo_remove() - Remove element given key, commit
* @net: Network namespace
* @set: nftables API set representation
* @elem: nftables API element representation containing key data
*
* Similarly to nft_pipapo_activate(), this is used as commit operation by the
* API, but it's called once per element in the pending transaction, so we can't
* implement this as a single commit operation. Closest we can get is to remove
* the matched element here, if any, and commit the updated matching data.
*/
static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
const struct nft_set_elem *elem)
{
struct nft_pipapo *priv = nft_set_priv(set);
struct nft_pipapo_match *m = priv->clone;
struct nft_pipapo_elem *e = elem->priv;
int rules_f0, first_rule = 0;
const u8 *data;
data = (const u8 *)nft_set_ext_key(&e->ext);
while ((rules_f0 = pipapo_rules_same_key(m->f, first_rule))) {
union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS];
const u8 *match_start, *match_end;
struct nft_pipapo_field *f;
int i, start, rules_fx;
match_start = data;
if (nft_set_ext_exists(&e->ext, NFT_SET_EXT_KEY_END))
match_end = (const u8 *)nft_set_ext_key_end(&e->ext)->data;
else
match_end = data;
start = first_rule;
rules_fx = rules_f0;
nft_pipapo_for_each_field(f, i, m) {
if (!pipapo_match_field(f, start, rules_fx,
match_start, match_end))
break;
rulemap[i].to = start;
rulemap[i].n = rules_fx;
rules_fx = f->mt[start].n;
start = f->mt[start].to;
match_start += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
match_end += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
}
if (i == m->field_count) {
priv->dirty = true;
pipapo_drop(m, rulemap);
return;
}
first_rule += rules_f0;
}
}
/**
* nft_pipapo_walk() - Walk over elements
* @ctx: nftables API context
* @set: nftables API set representation
* @iter: Iterator
*
* As elements are referenced in the mapping array for the last field, directly
* scan that array: there's no need to follow rule mappings from the first
* field.
*/
static void nft_pipapo_walk(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_iter *iter)
{
struct nft_pipapo *priv = nft_set_priv(set);
struct net *net = read_pnet(&set->net);
struct nft_pipapo_match *m;
struct nft_pipapo_field *f;
int i, r;
rcu_read_lock();
if (iter->genmask == nft_genmask_cur(net))
m = rcu_dereference(priv->match);
else
m = priv->clone;
if (unlikely(!m))
goto out;
for (i = 0, f = m->f; i < m->field_count - 1; i++, f++)
;
for (r = 0; r < f->rules; r++) {
struct nft_pipapo_elem *e;
struct nft_set_elem elem;
if (r < f->rules - 1 && f->mt[r + 1].e == f->mt[r].e)
continue;
if (iter->count < iter->skip)
goto cont;
e = f->mt[r].e;
elem.priv = e;
iter->err = iter->fn(ctx, set, iter, &elem);
if (iter->err < 0)
goto out;
cont:
iter->count++;
}
out:
rcu_read_unlock();
}
/**
* nft_pipapo_privsize() - Return the size of private data for the set
* @nla: netlink attributes, ignored as size doesn't depend on them
* @desc: Set description, ignored as size doesn't depend on it
*
* Return: size of private data for this set implementation, in bytes
*/
static u64 nft_pipapo_privsize(const struct nlattr * const nla[],
const struct nft_set_desc *desc)
{
return sizeof(struct nft_pipapo);
}
/**
* nft_pipapo_estimate() - Set size, space and lookup complexity
* @desc: Set description, element count and field description used
* @features: Flags: NFT_SET_INTERVAL needs to be there
* @est: Storage for estimation data
*
* Return: true if set description is compatible, false otherwise
*/
static bool nft_pipapo_estimate(const struct nft_set_desc *desc, u32 features,
struct nft_set_estimate *est)
{
if (!(features & NFT_SET_INTERVAL) ||
desc->field_count < NFT_PIPAPO_MIN_FIELDS)
return false;
est->size = pipapo_estimate_size(desc);
if (!est->size)
return false;
est->lookup = NFT_SET_CLASS_O_LOG_N;
est->space = NFT_SET_CLASS_O_N;
return true;
}
/**
* nft_pipapo_init() - Initialise data for a set instance
* @set: nftables API set representation
* @desc: Set description
* @nla: netlink attributes
*
* Validate number and size of fields passed as NFTA_SET_DESC_CONCAT netlink
* attributes, initialise internal set parameters, current instance of matching
* data and a copy for subsequent insertions.
*
* Return: 0 on success, negative error code on failure.
*/
static int nft_pipapo_init(const struct nft_set *set,
const struct nft_set_desc *desc,
const struct nlattr * const nla[])
{
struct nft_pipapo *priv = nft_set_priv(set);
struct nft_pipapo_match *m;
struct nft_pipapo_field *f;
int err, i, field_count;
field_count = desc->field_count ? : 1;
if (field_count > NFT_PIPAPO_MAX_FIELDS)
return -EINVAL;
m = kmalloc(struct_size(m, f, field_count), GFP_KERNEL);
if (!m)
return -ENOMEM;
m->field_count = field_count;
m->bsize_max = 0;
m->scratch = alloc_percpu(unsigned long *);
if (!m->scratch) {
err = -ENOMEM;
goto out_scratch;
}
for_each_possible_cpu(i)
*per_cpu_ptr(m->scratch, i) = NULL;
#ifdef NFT_PIPAPO_ALIGN
m->scratch_aligned = alloc_percpu(unsigned long *);
if (!m->scratch_aligned) {
err = -ENOMEM;
goto out_free;
}
for_each_possible_cpu(i)
*per_cpu_ptr(m->scratch_aligned, i) = NULL;
#endif
rcu_head_init(&m->rcu);
nft_pipapo_for_each_field(f, i, m) {
int len = desc->field_len[i] ? : set->klen;
f->bb = NFT_PIPAPO_GROUP_BITS_INIT;
f->groups = len * NFT_PIPAPO_GROUPS_PER_BYTE(f);
priv->width += round_up(len, sizeof(u32));
f->bsize = 0;
f->rules = 0;
NFT_PIPAPO_LT_ASSIGN(f, NULL);
f->mt = NULL;
}
/* Create an initial clone of matching data for next insertion */
priv->clone = pipapo_clone(m);
if (IS_ERR(priv->clone)) {
err = PTR_ERR(priv->clone);
goto out_free;
}
priv->dirty = false;
rcu_assign_pointer(priv->match, m);
return 0;
out_free:
#ifdef NFT_PIPAPO_ALIGN
free_percpu(m->scratch_aligned);
#endif
free_percpu(m->scratch);
out_scratch:
kfree(m);
return err;
}
/**
* nft_set_pipapo_match_destroy() - Destroy elements from key mapping array
* @ctx: context
* @set: nftables API set representation
* @m: matching data pointing to key mapping array
*/
static void nft_set_pipapo_match_destroy(const struct nft_ctx *ctx,
const struct nft_set *set,
struct nft_pipapo_match *m)
{
struct nft_pipapo_field *f;
int i, r;
for (i = 0, f = m->f; i < m->field_count - 1; i++, f++)
;
for (r = 0; r < f->rules; r++) {
struct nft_pipapo_elem *e;
if (r < f->rules - 1 && f->mt[r + 1].e == f->mt[r].e)
continue;
e = f->mt[r].e;
nf_tables_set_elem_destroy(ctx, set, e);
}
}
/**
* nft_pipapo_destroy() - Free private data for set and all committed elements
* @ctx: context
* @set: nftables API set representation
*/
static void nft_pipapo_destroy(const struct nft_ctx *ctx,
const struct nft_set *set)
{
struct nft_pipapo *priv = nft_set_priv(set);
struct nft_pipapo_match *m;
int cpu;
m = rcu_dereference_protected(priv->match, true);
if (m) {
rcu_barrier();
nft_set_pipapo_match_destroy(ctx, set, m);
#ifdef NFT_PIPAPO_ALIGN
free_percpu(m->scratch_aligned);
#endif
for_each_possible_cpu(cpu)
kfree(*per_cpu_ptr(m->scratch, cpu));
free_percpu(m->scratch);
pipapo_free_fields(m);
kfree(m);
priv->match = NULL;
}
if (priv->clone) {
m = priv->clone;
if (priv->dirty)
nft_set_pipapo_match_destroy(ctx, set, m);
#ifdef NFT_PIPAPO_ALIGN
free_percpu(priv->clone->scratch_aligned);
#endif
for_each_possible_cpu(cpu)
kfree(*per_cpu_ptr(priv->clone->scratch, cpu));
free_percpu(priv->clone->scratch);
pipapo_free_fields(priv->clone);
kfree(priv->clone);
priv->clone = NULL;
}
}
/**
* nft_pipapo_gc_init() - Initialise garbage collection
* @set: nftables API set representation
*
* Instead of actually setting up a periodic work for garbage collection, as
* this operation requires a swap of matching data with the working copy, we'll
* do that opportunistically with other commit operations if the interval is
* elapsed, so we just need to set the current jiffies timestamp here.
*/
static void nft_pipapo_gc_init(const struct nft_set *set)
{
struct nft_pipapo *priv = nft_set_priv(set);
priv->last_gc = jiffies;
}
const struct nft_set_type nft_set_pipapo_type = {
.features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT |
NFT_SET_TIMEOUT,
.ops = {
.lookup = nft_pipapo_lookup,
.insert = nft_pipapo_insert,
.activate = nft_pipapo_activate,
.deactivate = nft_pipapo_deactivate,
.flush = nft_pipapo_flush,
.remove = nft_pipapo_remove,
.walk = nft_pipapo_walk,
.get = nft_pipapo_get,
.privsize = nft_pipapo_privsize,
.estimate = nft_pipapo_estimate,
.init = nft_pipapo_init,
.destroy = nft_pipapo_destroy,
.gc_init = nft_pipapo_gc_init,
.commit = nft_pipapo_commit,
.abort = nft_pipapo_abort,
.elemsize = offsetof(struct nft_pipapo_elem, ext),
},
};
#if defined(CONFIG_X86_64) && !defined(CONFIG_UML)
const struct nft_set_type nft_set_pipapo_avx2_type = {
.features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT |
NFT_SET_TIMEOUT,
.ops = {
.lookup = nft_pipapo_avx2_lookup,
.insert = nft_pipapo_insert,
.activate = nft_pipapo_activate,
.deactivate = nft_pipapo_deactivate,
.flush = nft_pipapo_flush,
.remove = nft_pipapo_remove,
.walk = nft_pipapo_walk,
.get = nft_pipapo_get,
.privsize = nft_pipapo_privsize,
.estimate = nft_pipapo_avx2_estimate,
.init = nft_pipapo_init,
.destroy = nft_pipapo_destroy,
.gc_init = nft_pipapo_gc_init,
.commit = nft_pipapo_commit,
.abort = nft_pipapo_abort,
.elemsize = offsetof(struct nft_pipapo_elem, ext),
},
};
#endif
| linux-master | net/netfilter/nft_set_pipapo.c |
// SPDX-License-Identifier: GPL-2.0-only
/* SIP extension for NAT alteration.
*
* (C) 2005 by Christian Hentschel <[email protected]>
* based on RR's ip_nat_ftp.c and other modules.
* (C) 2007 United Security Providers
* (C) 2007, 2008, 2011, 2012 Patrick McHardy <[email protected]>
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/inet.h>
#include <linux/udp.h>
#include <linux/tcp.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_helper.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
#include <linux/netfilter/nf_conntrack_sip.h>
#define NAT_HELPER_NAME "sip"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Hentschel <[email protected]>");
MODULE_DESCRIPTION("SIP NAT helper");
MODULE_ALIAS_NF_NAT_HELPER(NAT_HELPER_NAME);
static struct nf_conntrack_nat_helper nat_helper_sip =
NF_CT_NAT_HELPER_INIT(NAT_HELPER_NAME);
static unsigned int mangle_packet(struct sk_buff *skb, unsigned int protoff,
unsigned int dataoff,
const char **dptr, unsigned int *datalen,
unsigned int matchoff, unsigned int matchlen,
const char *buffer, unsigned int buflen)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
struct tcphdr *th;
unsigned int baseoff;
if (nf_ct_protonum(ct) == IPPROTO_TCP) {
th = (struct tcphdr *)(skb->data + protoff);
baseoff = protoff + th->doff * 4;
matchoff += dataoff - baseoff;
if (!__nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
protoff, matchoff, matchlen,
buffer, buflen, false))
return 0;
} else {
baseoff = protoff + sizeof(struct udphdr);
matchoff += dataoff - baseoff;
if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo,
protoff, matchoff, matchlen,
buffer, buflen))
return 0;
}
/* Reload data pointer and adjust datalen value */
*dptr = skb->data + dataoff;
*datalen += buflen - matchlen;
return 1;
}
static int sip_sprintf_addr(const struct nf_conn *ct, char *buffer,
const union nf_inet_addr *addr, bool delim)
{
if (nf_ct_l3num(ct) == NFPROTO_IPV4)
return sprintf(buffer, "%pI4", &addr->ip);
else {
if (delim)
return sprintf(buffer, "[%pI6c]", &addr->ip6);
else
return sprintf(buffer, "%pI6c", &addr->ip6);
}
}
static int sip_sprintf_addr_port(const struct nf_conn *ct, char *buffer,
const union nf_inet_addr *addr, u16 port)
{
if (nf_ct_l3num(ct) == NFPROTO_IPV4)
return sprintf(buffer, "%pI4:%u", &addr->ip, port);
else
return sprintf(buffer, "[%pI6c]:%u", &addr->ip6, port);
}
static int map_addr(struct sk_buff *skb, unsigned int protoff,
unsigned int dataoff,
const char **dptr, unsigned int *datalen,
unsigned int matchoff, unsigned int matchlen,
union nf_inet_addr *addr, __be16 port)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
char buffer[INET6_ADDRSTRLEN + sizeof("[]:nnnnn")];
unsigned int buflen;
union nf_inet_addr newaddr;
__be16 newport;
if (nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, addr) &&
ct->tuplehash[dir].tuple.src.u.udp.port == port) {
newaddr = ct->tuplehash[!dir].tuple.dst.u3;
newport = ct->tuplehash[!dir].tuple.dst.u.udp.port;
} else if (nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, addr) &&
ct->tuplehash[dir].tuple.dst.u.udp.port == port) {
newaddr = ct->tuplehash[!dir].tuple.src.u3;
newport = ct_sip_info->forced_dport ? :
ct->tuplehash[!dir].tuple.src.u.udp.port;
} else
return 1;
if (nf_inet_addr_cmp(&newaddr, addr) && newport == port)
return 1;
buflen = sip_sprintf_addr_port(ct, buffer, &newaddr, ntohs(newport));
return mangle_packet(skb, protoff, dataoff, dptr, datalen,
matchoff, matchlen, buffer, buflen);
}
static int map_sip_addr(struct sk_buff *skb, unsigned int protoff,
unsigned int dataoff,
const char **dptr, unsigned int *datalen,
enum sip_header_types type)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
unsigned int matchlen, matchoff;
union nf_inet_addr addr;
__be16 port;
if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, type, NULL,
&matchoff, &matchlen, &addr, &port) <= 0)
return 1;
return map_addr(skb, protoff, dataoff, dptr, datalen,
matchoff, matchlen, &addr, port);
}
static unsigned int nf_nat_sip(struct sk_buff *skb, unsigned int protoff,
unsigned int dataoff,
const char **dptr, unsigned int *datalen)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
unsigned int coff, matchoff, matchlen;
enum sip_header_types hdr;
union nf_inet_addr addr;
__be16 port;
int request, in_header;
/* Basic rules: requests and responses. */
if (strncasecmp(*dptr, "SIP/2.0", strlen("SIP/2.0")) != 0) {
if (ct_sip_parse_request(ct, *dptr, *datalen,
&matchoff, &matchlen,
&addr, &port) > 0 &&
!map_addr(skb, protoff, dataoff, dptr, datalen,
matchoff, matchlen, &addr, port)) {
nf_ct_helper_log(skb, ct, "cannot mangle SIP message");
return NF_DROP;
}
request = 1;
} else
request = 0;
if (nf_ct_protonum(ct) == IPPROTO_TCP)
hdr = SIP_HDR_VIA_TCP;
else
hdr = SIP_HDR_VIA_UDP;
/* Translate topmost Via header and parameters */
if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen,
hdr, NULL, &matchoff, &matchlen,
&addr, &port) > 0) {
unsigned int olen, matchend, poff, plen, buflen, n;
char buffer[INET6_ADDRSTRLEN + sizeof("[]:nnnnn")];
/* We're only interested in headers related to this
* connection */
if (request) {
if (!nf_inet_addr_cmp(&addr,
&ct->tuplehash[dir].tuple.src.u3) ||
port != ct->tuplehash[dir].tuple.src.u.udp.port)
goto next;
} else {
if (!nf_inet_addr_cmp(&addr,
&ct->tuplehash[dir].tuple.dst.u3) ||
port != ct->tuplehash[dir].tuple.dst.u.udp.port)
goto next;
}
olen = *datalen;
if (!map_addr(skb, protoff, dataoff, dptr, datalen,
matchoff, matchlen, &addr, port)) {
nf_ct_helper_log(skb, ct, "cannot mangle Via header");
return NF_DROP;
}
matchend = matchoff + matchlen + *datalen - olen;
/* The maddr= parameter (RFC 2361) specifies where to send
* the reply. */
if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen,
"maddr=", &poff, &plen,
&addr, true) > 0 &&
nf_inet_addr_cmp(&addr, &ct->tuplehash[dir].tuple.src.u3) &&
!nf_inet_addr_cmp(&addr, &ct->tuplehash[!dir].tuple.dst.u3)) {
buflen = sip_sprintf_addr(ct, buffer,
&ct->tuplehash[!dir].tuple.dst.u3,
true);
if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
poff, plen, buffer, buflen)) {
nf_ct_helper_log(skb, ct, "cannot mangle maddr");
return NF_DROP;
}
}
/* The received= parameter (RFC 2361) contains the address
* from which the server received the request. */
if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen,
"received=", &poff, &plen,
&addr, false) > 0 &&
nf_inet_addr_cmp(&addr, &ct->tuplehash[dir].tuple.dst.u3) &&
!nf_inet_addr_cmp(&addr, &ct->tuplehash[!dir].tuple.src.u3)) {
buflen = sip_sprintf_addr(ct, buffer,
&ct->tuplehash[!dir].tuple.src.u3,
false);
if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
poff, plen, buffer, buflen)) {
nf_ct_helper_log(skb, ct, "cannot mangle received");
return NF_DROP;
}
}
/* The rport= parameter (RFC 3581) contains the port number
* from which the server received the request. */
if (ct_sip_parse_numerical_param(ct, *dptr, matchend, *datalen,
"rport=", &poff, &plen,
&n) > 0 &&
htons(n) == ct->tuplehash[dir].tuple.dst.u.udp.port &&
htons(n) != ct->tuplehash[!dir].tuple.src.u.udp.port) {
__be16 p = ct->tuplehash[!dir].tuple.src.u.udp.port;
buflen = sprintf(buffer, "%u", ntohs(p));
if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
poff, plen, buffer, buflen)) {
nf_ct_helper_log(skb, ct, "cannot mangle rport");
return NF_DROP;
}
}
}
next:
/* Translate Contact headers */
coff = 0;
in_header = 0;
while (ct_sip_parse_header_uri(ct, *dptr, &coff, *datalen,
SIP_HDR_CONTACT, &in_header,
&matchoff, &matchlen,
&addr, &port) > 0) {
if (!map_addr(skb, protoff, dataoff, dptr, datalen,
matchoff, matchlen,
&addr, port)) {
nf_ct_helper_log(skb, ct, "cannot mangle contact");
return NF_DROP;
}
}
if (!map_sip_addr(skb, protoff, dataoff, dptr, datalen, SIP_HDR_FROM) ||
!map_sip_addr(skb, protoff, dataoff, dptr, datalen, SIP_HDR_TO)) {
nf_ct_helper_log(skb, ct, "cannot mangle SIP from/to");
return NF_DROP;
}
/* Mangle destination port for Cisco phones, then fix up checksums */
if (dir == IP_CT_DIR_REPLY && ct_sip_info->forced_dport) {
struct udphdr *uh;
if (skb_ensure_writable(skb, skb->len)) {
nf_ct_helper_log(skb, ct, "cannot mangle packet");
return NF_DROP;
}
uh = (void *)skb->data + protoff;
uh->dest = ct_sip_info->forced_dport;
if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, protoff,
0, 0, NULL, 0)) {
nf_ct_helper_log(skb, ct, "cannot mangle packet");
return NF_DROP;
}
}
return NF_ACCEPT;
}
static void nf_nat_sip_seq_adjust(struct sk_buff *skb, unsigned int protoff,
s16 off)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
const struct tcphdr *th;
if (nf_ct_protonum(ct) != IPPROTO_TCP || off == 0)
return;
th = (struct tcphdr *)(skb->data + protoff);
nf_ct_seqadj_set(ct, ctinfo, th->seq, off);
}
/* Handles expected signalling connections and media streams */
static void nf_nat_sip_expected(struct nf_conn *ct,
struct nf_conntrack_expect *exp)
{
struct nf_conn_help *help = nfct_help(ct->master);
struct nf_conntrack_expect *pair_exp;
int range_set_for_snat = 0;
struct nf_nat_range2 range;
/* This must be a fresh one. */
BUG_ON(ct->status & IPS_NAT_DONE_MASK);
/* For DST manip, map port here to where it's expected. */
range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
range.min_proto = range.max_proto = exp->saved_proto;
range.min_addr = range.max_addr = exp->saved_addr;
nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
/* Do media streams SRC manip according with the parameters
* found in the paired expectation.
*/
if (exp->class != SIP_EXPECT_SIGNALLING) {
spin_lock_bh(&nf_conntrack_expect_lock);
hlist_for_each_entry(pair_exp, &help->expectations, lnode) {
if (pair_exp->tuple.src.l3num == nf_ct_l3num(ct) &&
pair_exp->tuple.dst.protonum == ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum &&
nf_inet_addr_cmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3, &pair_exp->saved_addr) &&
ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.all == pair_exp->saved_proto.all) {
range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
range.min_proto.all = range.max_proto.all = pair_exp->tuple.dst.u.all;
range.min_addr = range.max_addr = pair_exp->tuple.dst.u3;
range_set_for_snat = 1;
break;
}
}
spin_unlock_bh(&nf_conntrack_expect_lock);
}
/* When no paired expectation has been found, change src to
* where master sends to, but only if the connection actually came
* from the same source.
*/
if (!range_set_for_snat &&
nf_inet_addr_cmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3,
&ct->master->tuplehash[exp->dir].tuple.src.u3)) {
range.flags = NF_NAT_RANGE_MAP_IPS;
range.min_addr = range.max_addr
= ct->master->tuplehash[!exp->dir].tuple.dst.u3;
range_set_for_snat = 1;
}
/* Perform SRC manip. */
if (range_set_for_snat)
nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
}
static unsigned int nf_nat_sip_expect(struct sk_buff *skb, unsigned int protoff,
unsigned int dataoff,
const char **dptr, unsigned int *datalen,
struct nf_conntrack_expect *exp,
unsigned int matchoff,
unsigned int matchlen)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
union nf_inet_addr newaddr;
u_int16_t port;
__be16 srcport;
char buffer[INET6_ADDRSTRLEN + sizeof("[]:nnnnn")];
unsigned int buflen;
/* Connection will come from reply */
if (nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3,
&ct->tuplehash[!dir].tuple.dst.u3))
newaddr = exp->tuple.dst.u3;
else
newaddr = ct->tuplehash[!dir].tuple.dst.u3;
/* If the signalling port matches the connection's source port in the
* original direction, try to use the destination port in the opposite
* direction. */
srcport = ct_sip_info->forced_dport ? :
ct->tuplehash[dir].tuple.src.u.udp.port;
if (exp->tuple.dst.u.udp.port == srcport)
port = ntohs(ct->tuplehash[!dir].tuple.dst.u.udp.port);
else
port = ntohs(exp->tuple.dst.u.udp.port);
exp->saved_addr = exp->tuple.dst.u3;
exp->tuple.dst.u3 = newaddr;
exp->saved_proto.udp.port = exp->tuple.dst.u.udp.port;
exp->dir = !dir;
exp->expectfn = nf_nat_sip_expected;
port = nf_nat_exp_find_port(exp, port);
if (port == 0) {
nf_ct_helper_log(skb, ct, "all ports in use for SIP");
return NF_DROP;
}
if (!nf_inet_addr_cmp(&exp->tuple.dst.u3, &exp->saved_addr) ||
exp->tuple.dst.u.udp.port != exp->saved_proto.udp.port) {
buflen = sip_sprintf_addr_port(ct, buffer, &newaddr, port);
if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
matchoff, matchlen, buffer, buflen)) {
nf_ct_helper_log(skb, ct, "cannot mangle packet");
goto err;
}
}
return NF_ACCEPT;
err:
nf_ct_unexpect_related(exp);
return NF_DROP;
}
static int mangle_content_len(struct sk_buff *skb, unsigned int protoff,
unsigned int dataoff,
const char **dptr, unsigned int *datalen)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
unsigned int matchoff, matchlen;
char buffer[sizeof("65536")];
int buflen, c_len;
/* Get actual SDP length */
if (ct_sip_get_sdp_header(ct, *dptr, 0, *datalen,
SDP_HDR_VERSION, SDP_HDR_UNSPEC,
&matchoff, &matchlen) <= 0)
return 0;
c_len = *datalen - matchoff + strlen("v=");
/* Now, update SDP length */
if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CONTENT_LENGTH,
&matchoff, &matchlen) <= 0)
return 0;
buflen = sprintf(buffer, "%u", c_len);
return mangle_packet(skb, protoff, dataoff, dptr, datalen,
matchoff, matchlen, buffer, buflen);
}
static int mangle_sdp_packet(struct sk_buff *skb, unsigned int protoff,
unsigned int dataoff,
const char **dptr, unsigned int *datalen,
unsigned int sdpoff,
enum sdp_header_types type,
enum sdp_header_types term,
char *buffer, int buflen)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
unsigned int matchlen, matchoff;
if (ct_sip_get_sdp_header(ct, *dptr, sdpoff, *datalen, type, term,
&matchoff, &matchlen) <= 0)
return -ENOENT;
return mangle_packet(skb, protoff, dataoff, dptr, datalen,
matchoff, matchlen, buffer, buflen) ? 0 : -EINVAL;
}
static unsigned int nf_nat_sdp_addr(struct sk_buff *skb, unsigned int protoff,
unsigned int dataoff,
const char **dptr, unsigned int *datalen,
unsigned int sdpoff,
enum sdp_header_types type,
enum sdp_header_types term,
const union nf_inet_addr *addr)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
char buffer[INET6_ADDRSTRLEN];
unsigned int buflen;
buflen = sip_sprintf_addr(ct, buffer, addr, false);
if (mangle_sdp_packet(skb, protoff, dataoff, dptr, datalen,
sdpoff, type, term, buffer, buflen))
return 0;
return mangle_content_len(skb, protoff, dataoff, dptr, datalen);
}
static unsigned int nf_nat_sdp_port(struct sk_buff *skb, unsigned int protoff,
unsigned int dataoff,
const char **dptr, unsigned int *datalen,
unsigned int matchoff,
unsigned int matchlen,
u_int16_t port)
{
char buffer[sizeof("nnnnn")];
unsigned int buflen;
buflen = sprintf(buffer, "%u", port);
if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
matchoff, matchlen, buffer, buflen))
return 0;
return mangle_content_len(skb, protoff, dataoff, dptr, datalen);
}
static unsigned int nf_nat_sdp_session(struct sk_buff *skb, unsigned int protoff,
unsigned int dataoff,
const char **dptr, unsigned int *datalen,
unsigned int sdpoff,
const union nf_inet_addr *addr)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
char buffer[INET6_ADDRSTRLEN];
unsigned int buflen;
/* Mangle session description owner and contact addresses */
buflen = sip_sprintf_addr(ct, buffer, addr, false);
if (mangle_sdp_packet(skb, protoff, dataoff, dptr, datalen, sdpoff,
SDP_HDR_OWNER, SDP_HDR_MEDIA, buffer, buflen))
return 0;
switch (mangle_sdp_packet(skb, protoff, dataoff, dptr, datalen, sdpoff,
SDP_HDR_CONNECTION, SDP_HDR_MEDIA,
buffer, buflen)) {
case 0:
/*
* RFC 2327:
*
* Session description
*
* c=* (connection information - not required if included in all media)
*/
case -ENOENT:
break;
default:
return 0;
}
return mangle_content_len(skb, protoff, dataoff, dptr, datalen);
}
/* So, this packet has hit the connection tracking matching code.
Mangle it, and change the expectation to match the new version. */
static unsigned int nf_nat_sdp_media(struct sk_buff *skb, unsigned int protoff,
unsigned int dataoff,
const char **dptr, unsigned int *datalen,
struct nf_conntrack_expect *rtp_exp,
struct nf_conntrack_expect *rtcp_exp,
unsigned int mediaoff,
unsigned int medialen,
union nf_inet_addr *rtp_addr)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
u_int16_t port;
/* Connection will come from reply */
if (nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3,
&ct->tuplehash[!dir].tuple.dst.u3))
*rtp_addr = rtp_exp->tuple.dst.u3;
else
*rtp_addr = ct->tuplehash[!dir].tuple.dst.u3;
rtp_exp->saved_addr = rtp_exp->tuple.dst.u3;
rtp_exp->tuple.dst.u3 = *rtp_addr;
rtp_exp->saved_proto.udp.port = rtp_exp->tuple.dst.u.udp.port;
rtp_exp->dir = !dir;
rtp_exp->expectfn = nf_nat_sip_expected;
rtcp_exp->saved_addr = rtcp_exp->tuple.dst.u3;
rtcp_exp->tuple.dst.u3 = *rtp_addr;
rtcp_exp->saved_proto.udp.port = rtcp_exp->tuple.dst.u.udp.port;
rtcp_exp->dir = !dir;
rtcp_exp->expectfn = nf_nat_sip_expected;
/* Try to get same pair of ports: if not, try to change them. */
for (port = ntohs(rtp_exp->tuple.dst.u.udp.port);
port != 0; port += 2) {
int ret;
rtp_exp->tuple.dst.u.udp.port = htons(port);
ret = nf_ct_expect_related(rtp_exp,
NF_CT_EXP_F_SKIP_MASTER);
if (ret == -EBUSY)
continue;
else if (ret < 0) {
port = 0;
break;
}
rtcp_exp->tuple.dst.u.udp.port = htons(port + 1);
ret = nf_ct_expect_related(rtcp_exp,
NF_CT_EXP_F_SKIP_MASTER);
if (ret == 0)
break;
else if (ret == -EBUSY) {
nf_ct_unexpect_related(rtp_exp);
continue;
} else if (ret < 0) {
nf_ct_unexpect_related(rtp_exp);
port = 0;
break;
}
}
if (port == 0) {
nf_ct_helper_log(skb, ct, "all ports in use for SDP media");
goto err1;
}
/* Update media port. */
if (rtp_exp->tuple.dst.u.udp.port != rtp_exp->saved_proto.udp.port &&
!nf_nat_sdp_port(skb, protoff, dataoff, dptr, datalen,
mediaoff, medialen, port)) {
nf_ct_helper_log(skb, ct, "cannot mangle SDP message");
goto err2;
}
return NF_ACCEPT;
err2:
nf_ct_unexpect_related(rtp_exp);
nf_ct_unexpect_related(rtcp_exp);
err1:
return NF_DROP;
}
static struct nf_ct_helper_expectfn sip_nat = {
.name = "sip",
.expectfn = nf_nat_sip_expected,
};
static void __exit nf_nat_sip_fini(void)
{
nf_nat_helper_unregister(&nat_helper_sip);
RCU_INIT_POINTER(nf_nat_sip_hooks, NULL);
nf_ct_helper_expectfn_unregister(&sip_nat);
synchronize_rcu();
}
static const struct nf_nat_sip_hooks sip_hooks = {
.msg = nf_nat_sip,
.seq_adjust = nf_nat_sip_seq_adjust,
.expect = nf_nat_sip_expect,
.sdp_addr = nf_nat_sdp_addr,
.sdp_port = nf_nat_sdp_port,
.sdp_session = nf_nat_sdp_session,
.sdp_media = nf_nat_sdp_media,
};
static int __init nf_nat_sip_init(void)
{
BUG_ON(nf_nat_sip_hooks != NULL);
nf_nat_helper_register(&nat_helper_sip);
RCU_INIT_POINTER(nf_nat_sip_hooks, &sip_hooks);
nf_ct_helper_expectfn_register(&sip_nat);
return 0;
}
module_init(nf_nat_sip_init);
module_exit(nf_nat_sip_fini);
| linux-master | net/netfilter/nf_nat_sip.c |
// SPDX-License-Identifier: GPL-2.0-only
/* String matching match for iptables
*
* (C) 2005 Pablo Neira Ayuso <[email protected]>
*/
#include <linux/gfp.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_string.h>
#include <linux/textsearch.h>
MODULE_AUTHOR("Pablo Neira Ayuso <[email protected]>");
MODULE_DESCRIPTION("Xtables: string-based matching");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_string");
MODULE_ALIAS("ip6t_string");
MODULE_ALIAS("ebt_string");
static bool
string_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_string_info *conf = par->matchinfo;
bool invert;
invert = conf->u.v1.flags & XT_STRING_FLAG_INVERT;
return (skb_find_text((struct sk_buff *)skb, conf->from_offset,
conf->to_offset, conf->config)
!= UINT_MAX) ^ invert;
}
#define STRING_TEXT_PRIV(m) ((struct xt_string_info *)(m))
static int string_mt_check(const struct xt_mtchk_param *par)
{
struct xt_string_info *conf = par->matchinfo;
struct ts_config *ts_conf;
int flags = TS_AUTOLOAD;
/* Damn, can't handle this case properly with iptables... */
if (conf->from_offset > conf->to_offset)
return -EINVAL;
if (conf->algo[XT_STRING_MAX_ALGO_NAME_SIZE - 1] != '\0')
return -EINVAL;
if (conf->patlen > XT_STRING_MAX_PATTERN_SIZE)
return -EINVAL;
if (conf->u.v1.flags &
~(XT_STRING_FLAG_IGNORECASE | XT_STRING_FLAG_INVERT))
return -EINVAL;
if (conf->u.v1.flags & XT_STRING_FLAG_IGNORECASE)
flags |= TS_IGNORECASE;
ts_conf = textsearch_prepare(conf->algo, conf->pattern, conf->patlen,
GFP_KERNEL, flags);
if (IS_ERR(ts_conf))
return PTR_ERR(ts_conf);
conf->config = ts_conf;
return 0;
}
static void string_mt_destroy(const struct xt_mtdtor_param *par)
{
textsearch_destroy(STRING_TEXT_PRIV(par->matchinfo)->config);
}
static struct xt_match xt_string_mt_reg __read_mostly = {
.name = "string",
.revision = 1,
.family = NFPROTO_UNSPEC,
.checkentry = string_mt_check,
.match = string_mt,
.destroy = string_mt_destroy,
.matchsize = sizeof(struct xt_string_info),
.usersize = offsetof(struct xt_string_info, config),
.me = THIS_MODULE,
};
static int __init string_mt_init(void)
{
return xt_register_match(&xt_string_mt_reg);
}
static void __exit string_mt_exit(void)
{
xt_unregister_match(&xt_string_mt_reg);
}
module_init(string_mt_init);
module_exit(string_mt_exit);
| linux-master | net/netfilter/xt_string.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Generated by Jing Min Zhao's ASN.1 parser, May 16 2007
*
* Copyright (c) 2006 Jing Min Zhao <[email protected]>
*/
static const struct field_t _TransportAddress_ipAddress[] = { /* SEQUENCE */
{FNAME("ip") OCTSTR, FIXD, 4, 0, DECODE,
offsetof(TransportAddress_ipAddress, ip), NULL},
{FNAME("port") INT, WORD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _TransportAddress_ipSourceRoute_route[] = { /* SEQUENCE OF */
{FNAME("item") OCTSTR, FIXD, 4, 0, SKIP, 0, NULL},
};
static const struct field_t _TransportAddress_ipSourceRoute_routing[] = { /* CHOICE */
{FNAME("strict") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("loose") NUL, FIXD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _TransportAddress_ipSourceRoute[] = { /* SEQUENCE */
{FNAME("ip") OCTSTR, FIXD, 4, 0, SKIP, 0, NULL},
{FNAME("port") INT, WORD, 0, 0, SKIP, 0, NULL},
{FNAME("route") SEQOF, SEMI, 0, 0, SKIP, 0,
_TransportAddress_ipSourceRoute_route},
{FNAME("routing") CHOICE, 1, 2, 2, SKIP | EXT, 0,
_TransportAddress_ipSourceRoute_routing},
};
static const struct field_t _TransportAddress_ipxAddress[] = { /* SEQUENCE */
{FNAME("node") OCTSTR, FIXD, 6, 0, SKIP, 0, NULL},
{FNAME("netnum") OCTSTR, FIXD, 4, 0, SKIP, 0, NULL},
{FNAME("port") OCTSTR, FIXD, 2, 0, SKIP, 0, NULL},
};
static const struct field_t _TransportAddress_ip6Address[] = { /* SEQUENCE */
{FNAME("ip") OCTSTR, FIXD, 16, 0, DECODE,
offsetof(TransportAddress_ip6Address, ip), NULL},
{FNAME("port") INT, WORD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _H221NonStandard[] = { /* SEQUENCE */
{FNAME("t35CountryCode") INT, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("t35Extension") INT, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("manufacturerCode") INT, WORD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _NonStandardIdentifier[] = { /* CHOICE */
{FNAME("object") OID, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("h221NonStandard") SEQ, 0, 3, 3, SKIP | EXT, 0,
_H221NonStandard},
};
static const struct field_t _NonStandardParameter[] = { /* SEQUENCE */
{FNAME("nonStandardIdentifier") CHOICE, 1, 2, 2, SKIP | EXT, 0,
_NonStandardIdentifier},
{FNAME("data") OCTSTR, SEMI, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _TransportAddress[] = { /* CHOICE */
{FNAME("ipAddress") SEQ, 0, 2, 2, DECODE,
offsetof(TransportAddress, ipAddress), _TransportAddress_ipAddress},
{FNAME("ipSourceRoute") SEQ, 0, 4, 4, SKIP | EXT, 0,
_TransportAddress_ipSourceRoute},
{FNAME("ipxAddress") SEQ, 0, 3, 3, SKIP, 0,
_TransportAddress_ipxAddress},
{FNAME("ip6Address") SEQ, 0, 2, 2, DECODE | EXT,
offsetof(TransportAddress, ip6Address),
_TransportAddress_ip6Address},
{FNAME("netBios") OCTSTR, FIXD, 16, 0, SKIP, 0, NULL},
{FNAME("nsap") OCTSTR, 5, 1, 0, SKIP, 0, NULL},
{FNAME("nonStandardAddress") SEQ, 0, 2, 2, SKIP, 0,
_NonStandardParameter},
};
static const struct field_t _AliasAddress[] = { /* CHOICE */
{FNAME("dialedDigits") NUMDGT, 7, 1, 0, SKIP, 0, NULL},
{FNAME("h323-ID") BMPSTR, BYTE, 1, 0, SKIP, 0, NULL},
{FNAME("url-ID") IA5STR, WORD, 1, 0, SKIP, 0, NULL},
{FNAME("transportID") CHOICE, 3, 7, 7, SKIP | EXT, 0, NULL},
{FNAME("email-ID") IA5STR, WORD, 1, 0, SKIP, 0, NULL},
{FNAME("partyNumber") CHOICE, 3, 5, 5, SKIP | EXT, 0, NULL},
{FNAME("mobileUIM") CHOICE, 1, 2, 2, SKIP | EXT, 0, NULL},
};
static const struct field_t _Setup_UUIE_sourceAddress[] = { /* SEQUENCE OF */
{FNAME("item") CHOICE, 1, 2, 7, SKIP | EXT, 0, _AliasAddress},
};
static const struct field_t _VendorIdentifier[] = { /* SEQUENCE */
{FNAME("vendor") SEQ, 0, 3, 3, SKIP | EXT, 0, _H221NonStandard},
{FNAME("productId") OCTSTR, BYTE, 1, 0, SKIP | OPT, 0, NULL},
{FNAME("versionId") OCTSTR, BYTE, 1, 0, SKIP | OPT, 0, NULL},
};
static const struct field_t _GatekeeperInfo[] = { /* SEQUENCE */
{FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
_NonStandardParameter},
};
static const struct field_t _H310Caps[] = { /* SEQUENCE */
{FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
_NonStandardParameter},
{FNAME("dataRatesSupported") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("supportedPrefixes") SEQOF, SEMI, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _H320Caps[] = { /* SEQUENCE */
{FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
_NonStandardParameter},
{FNAME("dataRatesSupported") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("supportedPrefixes") SEQOF, SEMI, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _H321Caps[] = { /* SEQUENCE */
{FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
_NonStandardParameter},
{FNAME("dataRatesSupported") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("supportedPrefixes") SEQOF, SEMI, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _H322Caps[] = { /* SEQUENCE */
{FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
_NonStandardParameter},
{FNAME("dataRatesSupported") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("supportedPrefixes") SEQOF, SEMI, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _H323Caps[] = { /* SEQUENCE */
{FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
_NonStandardParameter},
{FNAME("dataRatesSupported") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("supportedPrefixes") SEQOF, SEMI, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _H324Caps[] = { /* SEQUENCE */
{FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
_NonStandardParameter},
{FNAME("dataRatesSupported") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("supportedPrefixes") SEQOF, SEMI, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _VoiceCaps[] = { /* SEQUENCE */
{FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
_NonStandardParameter},
{FNAME("dataRatesSupported") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("supportedPrefixes") SEQOF, SEMI, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _T120OnlyCaps[] = { /* SEQUENCE */
{FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
_NonStandardParameter},
{FNAME("dataRatesSupported") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("supportedPrefixes") SEQOF, SEMI, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _SupportedProtocols[] = { /* CHOICE */
{FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP, 0,
_NonStandardParameter},
{FNAME("h310") SEQ, 1, 1, 3, SKIP | EXT, 0, _H310Caps},
{FNAME("h320") SEQ, 1, 1, 3, SKIP | EXT, 0, _H320Caps},
{FNAME("h321") SEQ, 1, 1, 3, SKIP | EXT, 0, _H321Caps},
{FNAME("h322") SEQ, 1, 1, 3, SKIP | EXT, 0, _H322Caps},
{FNAME("h323") SEQ, 1, 1, 3, SKIP | EXT, 0, _H323Caps},
{FNAME("h324") SEQ, 1, 1, 3, SKIP | EXT, 0, _H324Caps},
{FNAME("voice") SEQ, 1, 1, 3, SKIP | EXT, 0, _VoiceCaps},
{FNAME("t120-only") SEQ, 1, 1, 3, SKIP | EXT, 0, _T120OnlyCaps},
{FNAME("nonStandardProtocol") SEQ, 2, 3, 3, SKIP | EXT, 0, NULL},
{FNAME("t38FaxAnnexbOnly") SEQ, 2, 5, 5, SKIP | EXT, 0, NULL},
};
static const struct field_t _GatewayInfo_protocol[] = { /* SEQUENCE OF */
{FNAME("item") CHOICE, 4, 9, 11, SKIP | EXT, 0, _SupportedProtocols},
};
static const struct field_t _GatewayInfo[] = { /* SEQUENCE */
{FNAME("protocol") SEQOF, SEMI, 0, 0, SKIP | OPT, 0,
_GatewayInfo_protocol},
{FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
_NonStandardParameter},
};
static const struct field_t _McuInfo[] = { /* SEQUENCE */
{FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
_NonStandardParameter},
{FNAME("protocol") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
};
static const struct field_t _TerminalInfo[] = { /* SEQUENCE */
{FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
_NonStandardParameter},
};
static const struct field_t _EndpointType[] = { /* SEQUENCE */
{FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
_NonStandardParameter},
{FNAME("vendor") SEQ, 2, 3, 3, SKIP | EXT | OPT, 0,
_VendorIdentifier},
{FNAME("gatekeeper") SEQ, 1, 1, 1, SKIP | EXT | OPT, 0,
_GatekeeperInfo},
{FNAME("gateway") SEQ, 2, 2, 2, SKIP | EXT | OPT, 0, _GatewayInfo},
{FNAME("mcu") SEQ, 1, 1, 2, SKIP | EXT | OPT, 0, _McuInfo},
{FNAME("terminal") SEQ, 1, 1, 1, SKIP | EXT | OPT, 0, _TerminalInfo},
{FNAME("mc") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("undefinedNode") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("set") BITSTR, FIXD, 32, 0, SKIP | OPT, 0, NULL},
{FNAME("supportedTunnelledProtocols") SEQOF, SEMI, 0, 0, SKIP | OPT,
0, NULL},
};
static const struct field_t _Setup_UUIE_destinationAddress[] = { /* SEQUENCE OF */
{FNAME("item") CHOICE, 1, 2, 7, SKIP | EXT, 0, _AliasAddress},
};
static const struct field_t _Setup_UUIE_destExtraCallInfo[] = { /* SEQUENCE OF */
{FNAME("item") CHOICE, 1, 2, 7, SKIP | EXT, 0, _AliasAddress},
};
static const struct field_t _Setup_UUIE_destExtraCRV[] = { /* SEQUENCE OF */
{FNAME("item") INT, WORD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _Setup_UUIE_conferenceGoal[] = { /* CHOICE */
{FNAME("create") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("join") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("invite") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("capability-negotiation") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("callIndependentSupplementaryService") NUL, FIXD, 0, 0, SKIP,
0, NULL},
};
static const struct field_t _Q954Details[] = { /* SEQUENCE */
{FNAME("conferenceCalling") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("threePartyService") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _QseriesOptions[] = { /* SEQUENCE */
{FNAME("q932Full") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("q951Full") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("q952Full") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("q953Full") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("q955Full") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("q956Full") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("q957Full") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("q954Info") SEQ, 0, 2, 2, SKIP | EXT, 0, _Q954Details},
};
static const struct field_t _CallType[] = { /* CHOICE */
{FNAME("pointToPoint") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("oneToN") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("nToOne") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("nToN") NUL, FIXD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _H245_NonStandardIdentifier_h221NonStandard[] = { /* SEQUENCE */
{FNAME("t35CountryCode") INT, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("t35Extension") INT, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("manufacturerCode") INT, WORD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _H245_NonStandardIdentifier[] = { /* CHOICE */
{FNAME("object") OID, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("h221NonStandard") SEQ, 0, 3, 3, SKIP, 0,
_H245_NonStandardIdentifier_h221NonStandard},
};
static const struct field_t _H245_NonStandardParameter[] = { /* SEQUENCE */
{FNAME("nonStandardIdentifier") CHOICE, 1, 2, 2, SKIP, 0,
_H245_NonStandardIdentifier},
{FNAME("data") OCTSTR, SEMI, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _H261VideoCapability[] = { /* SEQUENCE */
{FNAME("qcifMPI") INT, 2, 1, 0, SKIP | OPT, 0, NULL},
{FNAME("cifMPI") INT, 2, 1, 0, SKIP | OPT, 0, NULL},
{FNAME("temporalSpatialTradeOffCapability") BOOL, FIXD, 0, 0, SKIP, 0,
NULL},
{FNAME("maxBitRate") INT, WORD, 1, 0, SKIP, 0, NULL},
{FNAME("stillImageTransmission") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("videoBadMBsCap") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _H262VideoCapability[] = { /* SEQUENCE */
{FNAME("profileAndLevel-SPatML") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("profileAndLevel-MPatLL") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("profileAndLevel-MPatML") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("profileAndLevel-MPatH-14") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("profileAndLevel-MPatHL") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("profileAndLevel-SNRatLL") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("profileAndLevel-SNRatML") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("profileAndLevel-SpatialatH-14") BOOL, FIXD, 0, 0, SKIP, 0,
NULL},
{FNAME("profileAndLevel-HPatML") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("profileAndLevel-HPatH-14") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("profileAndLevel-HPatHL") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("videoBitRate") INT, CONS, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("vbvBufferSize") INT, CONS, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("samplesPerLine") INT, WORD, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("linesPerFrame") INT, WORD, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("framesPerSecond") INT, 4, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("luminanceSampleRate") INT, CONS, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("videoBadMBsCap") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _H263VideoCapability[] = { /* SEQUENCE */
{FNAME("sqcifMPI") INT, 5, 1, 0, SKIP | OPT, 0, NULL},
{FNAME("qcifMPI") INT, 5, 1, 0, SKIP | OPT, 0, NULL},
{FNAME("cifMPI") INT, 5, 1, 0, SKIP | OPT, 0, NULL},
{FNAME("cif4MPI") INT, 5, 1, 0, SKIP | OPT, 0, NULL},
{FNAME("cif16MPI") INT, 5, 1, 0, SKIP | OPT, 0, NULL},
{FNAME("maxBitRate") INT, CONS, 1, 0, SKIP, 0, NULL},
{FNAME("unrestrictedVector") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("arithmeticCoding") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("advancedPrediction") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("pbFrames") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("temporalSpatialTradeOffCapability") BOOL, FIXD, 0, 0, SKIP, 0,
NULL},
{FNAME("hrd-B") INT, CONS, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("bppMaxKb") INT, WORD, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("slowSqcifMPI") INT, WORD, 1, 0, SKIP | OPT, 0, NULL},
{FNAME("slowQcifMPI") INT, WORD, 1, 0, SKIP | OPT, 0, NULL},
{FNAME("slowCifMPI") INT, WORD, 1, 0, SKIP | OPT, 0, NULL},
{FNAME("slowCif4MPI") INT, WORD, 1, 0, SKIP | OPT, 0, NULL},
{FNAME("slowCif16MPI") INT, WORD, 1, 0, SKIP | OPT, 0, NULL},
{FNAME("errorCompensation") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("enhancementLayerInfo") SEQ, 3, 4, 4, SKIP | EXT | OPT, 0,
NULL},
{FNAME("h263Options") SEQ, 5, 29, 31, SKIP | EXT | OPT, 0, NULL},
};
static const struct field_t _IS11172VideoCapability[] = { /* SEQUENCE */
{FNAME("constrainedBitstream") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("videoBitRate") INT, CONS, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("vbvBufferSize") INT, CONS, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("samplesPerLine") INT, WORD, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("linesPerFrame") INT, WORD, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("pictureRate") INT, 4, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("luminanceSampleRate") INT, CONS, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("videoBadMBsCap") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _VideoCapability[] = { /* CHOICE */
{FNAME("nonStandard") SEQ, 0, 2, 2, SKIP, 0,
_H245_NonStandardParameter},
{FNAME("h261VideoCapability") SEQ, 2, 5, 6, SKIP | EXT, 0,
_H261VideoCapability},
{FNAME("h262VideoCapability") SEQ, 6, 17, 18, SKIP | EXT, 0,
_H262VideoCapability},
{FNAME("h263VideoCapability") SEQ, 7, 13, 21, SKIP | EXT, 0,
_H263VideoCapability},
{FNAME("is11172VideoCapability") SEQ, 6, 7, 8, SKIP | EXT, 0,
_IS11172VideoCapability},
{FNAME("genericVideoCapability") SEQ, 5, 6, 6, SKIP | EXT, 0, NULL},
};
static const struct field_t _AudioCapability_g7231[] = { /* SEQUENCE */
{FNAME("maxAl-sduAudioFrames") INT, BYTE, 1, 0, SKIP, 0, NULL},
{FNAME("silenceSuppression") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _IS11172AudioCapability[] = { /* SEQUENCE */
{FNAME("audioLayer1") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("audioLayer2") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("audioLayer3") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("audioSampling32k") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("audioSampling44k1") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("audioSampling48k") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("singleChannel") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("twoChannels") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("bitRate") INT, WORD, 1, 0, SKIP, 0, NULL},
};
static const struct field_t _IS13818AudioCapability[] = { /* SEQUENCE */
{FNAME("audioLayer1") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("audioLayer2") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("audioLayer3") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("audioSampling16k") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("audioSampling22k05") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("audioSampling24k") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("audioSampling32k") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("audioSampling44k1") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("audioSampling48k") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("singleChannel") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("twoChannels") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("threeChannels2-1") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("threeChannels3-0") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("fourChannels2-0-2-0") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("fourChannels2-2") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("fourChannels3-1") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("fiveChannels3-0-2-0") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("fiveChannels3-2") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("lowFrequencyEnhancement") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("multilingual") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("bitRate") INT, WORD, 1, 0, SKIP, 0, NULL},
};
static const struct field_t _AudioCapability[] = { /* CHOICE */
{FNAME("nonStandard") SEQ, 0, 2, 2, SKIP, 0,
_H245_NonStandardParameter},
{FNAME("g711Alaw64k") INT, BYTE, 1, 0, SKIP, 0, NULL},
{FNAME("g711Alaw56k") INT, BYTE, 1, 0, SKIP, 0, NULL},
{FNAME("g711Ulaw64k") INT, BYTE, 1, 0, SKIP, 0, NULL},
{FNAME("g711Ulaw56k") INT, BYTE, 1, 0, SKIP, 0, NULL},
{FNAME("g722-64k") INT, BYTE, 1, 0, SKIP, 0, NULL},
{FNAME("g722-56k") INT, BYTE, 1, 0, SKIP, 0, NULL},
{FNAME("g722-48k") INT, BYTE, 1, 0, SKIP, 0, NULL},
{FNAME("g7231") SEQ, 0, 2, 2, SKIP, 0, _AudioCapability_g7231},
{FNAME("g728") INT, BYTE, 1, 0, SKIP, 0, NULL},
{FNAME("g729") INT, BYTE, 1, 0, SKIP, 0, NULL},
{FNAME("g729AnnexA") INT, BYTE, 1, 0, SKIP, 0, NULL},
{FNAME("is11172AudioCapability") SEQ, 0, 9, 9, SKIP | EXT, 0,
_IS11172AudioCapability},
{FNAME("is13818AudioCapability") SEQ, 0, 21, 21, SKIP | EXT, 0,
_IS13818AudioCapability},
{FNAME("g729wAnnexB") INT, BYTE, 1, 0, SKIP, 0, NULL},
{FNAME("g729AnnexAwAnnexB") INT, BYTE, 1, 0, SKIP, 0, NULL},
{FNAME("g7231AnnexCCapability") SEQ, 1, 3, 3, SKIP | EXT, 0, NULL},
{FNAME("gsmFullRate") SEQ, 0, 3, 3, SKIP | EXT, 0, NULL},
{FNAME("gsmHalfRate") SEQ, 0, 3, 3, SKIP | EXT, 0, NULL},
{FNAME("gsmEnhancedFullRate") SEQ, 0, 3, 3, SKIP | EXT, 0, NULL},
{FNAME("genericAudioCapability") SEQ, 5, 6, 6, SKIP | EXT, 0, NULL},
{FNAME("g729Extensions") SEQ, 1, 8, 8, SKIP | EXT, 0, NULL},
};
static const struct field_t _DataProtocolCapability[] = { /* CHOICE */
{FNAME("nonStandard") SEQ, 0, 2, 2, SKIP, 0,
_H245_NonStandardParameter},
{FNAME("v14buffered") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("v42lapm") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("hdlcFrameTunnelling") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("h310SeparateVCStack") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("h310SingleVCStack") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("transparent") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("segmentationAndReassembly") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("hdlcFrameTunnelingwSAR") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("v120") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("separateLANStack") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("v76wCompression") CHOICE, 2, 3, 3, SKIP | EXT, 0, NULL},
{FNAME("tcp") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("udp") NUL, FIXD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _T84Profile_t84Restricted[] = { /* SEQUENCE */
{FNAME("qcif") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("cif") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("ccir601Seq") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("ccir601Prog") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("hdtvSeq") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("hdtvProg") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("g3FacsMH200x100") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("g3FacsMH200x200") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("g4FacsMMR200x100") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("g4FacsMMR200x200") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("jbig200x200Seq") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("jbig200x200Prog") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("jbig300x300Seq") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("jbig300x300Prog") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("digPhotoLow") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("digPhotoMedSeq") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("digPhotoMedProg") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("digPhotoHighSeq") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("digPhotoHighProg") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _T84Profile[] = { /* CHOICE */
{FNAME("t84Unrestricted") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("t84Restricted") SEQ, 0, 19, 19, SKIP | EXT, 0,
_T84Profile_t84Restricted},
};
static const struct field_t _DataApplicationCapability_application_t84[] = { /* SEQUENCE */
{FNAME("t84Protocol") CHOICE, 3, 7, 14, SKIP | EXT, 0,
_DataProtocolCapability},
{FNAME("t84Profile") CHOICE, 1, 2, 2, SKIP, 0, _T84Profile},
};
static const struct field_t _DataApplicationCapability_application_nlpid[] = { /* SEQUENCE */
{FNAME("nlpidProtocol") CHOICE, 3, 7, 14, SKIP | EXT, 0,
_DataProtocolCapability},
{FNAME("nlpidData") OCTSTR, SEMI, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _DataApplicationCapability_application[] = { /* CHOICE */
{FNAME("nonStandard") SEQ, 0, 2, 2, SKIP, 0,
_H245_NonStandardParameter},
{FNAME("t120") CHOICE, 3, 7, 14, DECODE | EXT,
offsetof(DataApplicationCapability_application, t120),
_DataProtocolCapability},
{FNAME("dsm-cc") CHOICE, 3, 7, 14, SKIP | EXT, 0,
_DataProtocolCapability},
{FNAME("userData") CHOICE, 3, 7, 14, SKIP | EXT, 0,
_DataProtocolCapability},
{FNAME("t84") SEQ, 0, 2, 2, SKIP, 0,
_DataApplicationCapability_application_t84},
{FNAME("t434") CHOICE, 3, 7, 14, SKIP | EXT, 0,
_DataProtocolCapability},
{FNAME("h224") CHOICE, 3, 7, 14, SKIP | EXT, 0,
_DataProtocolCapability},
{FNAME("nlpid") SEQ, 0, 2, 2, SKIP, 0,
_DataApplicationCapability_application_nlpid},
{FNAME("dsvdControl") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("h222DataPartitioning") CHOICE, 3, 7, 14, SKIP | EXT, 0,
_DataProtocolCapability},
{FNAME("t30fax") CHOICE, 3, 7, 14, SKIP | EXT, 0, NULL},
{FNAME("t140") CHOICE, 3, 7, 14, SKIP | EXT, 0, NULL},
{FNAME("t38fax") SEQ, 0, 2, 2, SKIP, 0, NULL},
{FNAME("genericDataCapability") SEQ, 5, 6, 6, SKIP | EXT, 0, NULL},
};
static const struct field_t _DataApplicationCapability[] = { /* SEQUENCE */
{FNAME("application") CHOICE, 4, 10, 14, DECODE | EXT,
offsetof(DataApplicationCapability, application),
_DataApplicationCapability_application},
{FNAME("maxBitRate") INT, CONS, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _EncryptionMode[] = { /* CHOICE */
{FNAME("nonStandard") SEQ, 0, 2, 2, SKIP, 0,
_H245_NonStandardParameter},
{FNAME("h233Encryption") NUL, FIXD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _DataType[] = { /* CHOICE */
{FNAME("nonStandard") SEQ, 0, 2, 2, SKIP, 0,
_H245_NonStandardParameter},
{FNAME("nullData") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("videoData") CHOICE, 3, 5, 6, SKIP | EXT, 0, _VideoCapability},
{FNAME("audioData") CHOICE, 4, 14, 22, SKIP | EXT, 0,
_AudioCapability},
{FNAME("data") SEQ, 0, 2, 2, DECODE | EXT, offsetof(DataType, data),
_DataApplicationCapability},
{FNAME("encryptionData") CHOICE, 1, 2, 2, SKIP | EXT, 0,
_EncryptionMode},
{FNAME("h235Control") SEQ, 0, 2, 2, SKIP, 0, NULL},
{FNAME("h235Media") SEQ, 0, 2, 2, SKIP | EXT, 0, NULL},
{FNAME("multiplexedStream") SEQ, 0, 2, 2, SKIP | EXT, 0, NULL},
};
static const struct field_t _H222LogicalChannelParameters[] = { /* SEQUENCE */
{FNAME("resourceID") INT, WORD, 0, 0, SKIP, 0, NULL},
{FNAME("subChannelID") INT, WORD, 0, 0, SKIP, 0, NULL},
{FNAME("pcr-pid") INT, WORD, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("programDescriptors") OCTSTR, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("streamDescriptors") OCTSTR, SEMI, 0, 0, SKIP | OPT, 0, NULL},
};
static const struct field_t _H223LogicalChannelParameters_adaptationLayerType_al3[] = { /* SEQUENCE */
{FNAME("controlFieldOctets") INT, 2, 0, 0, SKIP, 0, NULL},
{FNAME("sendBufferSize") INT, CONS, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _H223LogicalChannelParameters_adaptationLayerType[] = { /* CHOICE */
{FNAME("nonStandard") SEQ, 0, 2, 2, SKIP, 0,
_H245_NonStandardParameter},
{FNAME("al1Framed") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("al1NotFramed") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("al2WithoutSequenceNumbers") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("al2WithSequenceNumbers") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("al3") SEQ, 0, 2, 2, SKIP, 0,
_H223LogicalChannelParameters_adaptationLayerType_al3},
{FNAME("al1M") SEQ, 0, 7, 8, SKIP | EXT, 0, NULL},
{FNAME("al2M") SEQ, 0, 2, 2, SKIP | EXT, 0, NULL},
{FNAME("al3M") SEQ, 0, 5, 6, SKIP | EXT, 0, NULL},
};
static const struct field_t _H223LogicalChannelParameters[] = { /* SEQUENCE */
{FNAME("adaptationLayerType") CHOICE, 3, 6, 9, SKIP | EXT, 0,
_H223LogicalChannelParameters_adaptationLayerType},
{FNAME("segmentableFlag") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _CRCLength[] = { /* CHOICE */
{FNAME("crc8bit") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("crc16bit") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("crc32bit") NUL, FIXD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _V76HDLCParameters[] = { /* SEQUENCE */
{FNAME("crcLength") CHOICE, 2, 3, 3, SKIP | EXT, 0, _CRCLength},
{FNAME("n401") INT, WORD, 1, 0, SKIP, 0, NULL},
{FNAME("loopbackTestProcedure") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _V76LogicalChannelParameters_suspendResume[] = { /* CHOICE */
{FNAME("noSuspendResume") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("suspendResumewAddress") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("suspendResumewoAddress") NUL, FIXD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _V76LogicalChannelParameters_mode_eRM_recovery[] = { /* CHOICE */
{FNAME("rej") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("sREJ") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("mSREJ") NUL, FIXD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _V76LogicalChannelParameters_mode_eRM[] = { /* SEQUENCE */
{FNAME("windowSize") INT, 7, 1, 0, SKIP, 0, NULL},
{FNAME("recovery") CHOICE, 2, 3, 3, SKIP | EXT, 0,
_V76LogicalChannelParameters_mode_eRM_recovery},
};
static const struct field_t _V76LogicalChannelParameters_mode[] = { /* CHOICE */
{FNAME("eRM") SEQ, 0, 2, 2, SKIP | EXT, 0,
_V76LogicalChannelParameters_mode_eRM},
{FNAME("uNERM") NUL, FIXD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _V75Parameters[] = { /* SEQUENCE */
{FNAME("audioHeaderPresent") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _V76LogicalChannelParameters[] = { /* SEQUENCE */
{FNAME("hdlcParameters") SEQ, 0, 3, 3, SKIP | EXT, 0,
_V76HDLCParameters},
{FNAME("suspendResume") CHOICE, 2, 3, 3, SKIP | EXT, 0,
_V76LogicalChannelParameters_suspendResume},
{FNAME("uIH") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("mode") CHOICE, 1, 2, 2, SKIP | EXT, 0,
_V76LogicalChannelParameters_mode},
{FNAME("v75Parameters") SEQ, 0, 1, 1, SKIP | EXT, 0, _V75Parameters},
};
static const struct field_t _H2250LogicalChannelParameters_nonStandard[] = { /* SEQUENCE OF */
{FNAME("item") SEQ, 0, 2, 2, SKIP, 0, _H245_NonStandardParameter},
};
static const struct field_t _UnicastAddress_iPAddress[] = { /* SEQUENCE */
{FNAME("network") OCTSTR, FIXD, 4, 0, DECODE,
offsetof(UnicastAddress_iPAddress, network), NULL},
{FNAME("tsapIdentifier") INT, WORD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _UnicastAddress_iPXAddress[] = { /* SEQUENCE */
{FNAME("node") OCTSTR, FIXD, 6, 0, SKIP, 0, NULL},
{FNAME("netnum") OCTSTR, FIXD, 4, 0, SKIP, 0, NULL},
{FNAME("tsapIdentifier") OCTSTR, FIXD, 2, 0, SKIP, 0, NULL},
};
static const struct field_t _UnicastAddress_iP6Address[] = { /* SEQUENCE */
{FNAME("network") OCTSTR, FIXD, 16, 0, DECODE,
offsetof(UnicastAddress_iP6Address, network), NULL},
{FNAME("tsapIdentifier") INT, WORD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _UnicastAddress_iPSourceRouteAddress_routing[] = { /* CHOICE */
{FNAME("strict") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("loose") NUL, FIXD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _UnicastAddress_iPSourceRouteAddress_route[] = { /* SEQUENCE OF */
{FNAME("item") OCTSTR, FIXD, 4, 0, SKIP, 0, NULL},
};
static const struct field_t _UnicastAddress_iPSourceRouteAddress[] = { /* SEQUENCE */
{FNAME("routing") CHOICE, 1, 2, 2, SKIP, 0,
_UnicastAddress_iPSourceRouteAddress_routing},
{FNAME("network") OCTSTR, FIXD, 4, 0, SKIP, 0, NULL},
{FNAME("tsapIdentifier") INT, WORD, 0, 0, SKIP, 0, NULL},
{FNAME("route") SEQOF, SEMI, 0, 0, SKIP, 0,
_UnicastAddress_iPSourceRouteAddress_route},
};
static const struct field_t _UnicastAddress[] = { /* CHOICE */
{FNAME("iPAddress") SEQ, 0, 2, 2, DECODE | EXT,
offsetof(UnicastAddress, iPAddress), _UnicastAddress_iPAddress},
{FNAME("iPXAddress") SEQ, 0, 3, 3, SKIP | EXT, 0,
_UnicastAddress_iPXAddress},
{FNAME("iP6Address") SEQ, 0, 2, 2, DECODE | EXT,
offsetof(UnicastAddress, iP6Address), _UnicastAddress_iP6Address},
{FNAME("netBios") OCTSTR, FIXD, 16, 0, SKIP, 0, NULL},
{FNAME("iPSourceRouteAddress") SEQ, 0, 4, 4, SKIP | EXT, 0,
_UnicastAddress_iPSourceRouteAddress},
{FNAME("nsap") OCTSTR, 5, 1, 0, SKIP, 0, NULL},
{FNAME("nonStandardAddress") SEQ, 0, 2, 2, SKIP, 0, NULL},
};
static const struct field_t _MulticastAddress_iPAddress[] = { /* SEQUENCE */
{FNAME("network") OCTSTR, FIXD, 4, 0, SKIP, 0, NULL},
{FNAME("tsapIdentifier") INT, WORD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _MulticastAddress_iP6Address[] = { /* SEQUENCE */
{FNAME("network") OCTSTR, FIXD, 16, 0, SKIP, 0, NULL},
{FNAME("tsapIdentifier") INT, WORD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _MulticastAddress[] = { /* CHOICE */
{FNAME("iPAddress") SEQ, 0, 2, 2, SKIP | EXT, 0,
_MulticastAddress_iPAddress},
{FNAME("iP6Address") SEQ, 0, 2, 2, SKIP | EXT, 0,
_MulticastAddress_iP6Address},
{FNAME("nsap") OCTSTR, 5, 1, 0, SKIP, 0, NULL},
{FNAME("nonStandardAddress") SEQ, 0, 2, 2, SKIP, 0, NULL},
};
static const struct field_t _H245_TransportAddress[] = { /* CHOICE */
{FNAME("unicastAddress") CHOICE, 3, 5, 7, DECODE | EXT,
offsetof(H245_TransportAddress, unicastAddress), _UnicastAddress},
{FNAME("multicastAddress") CHOICE, 1, 2, 4, SKIP | EXT, 0,
_MulticastAddress},
};
static const struct field_t _H2250LogicalChannelParameters[] = { /* SEQUENCE */
{FNAME("nonStandard") SEQOF, SEMI, 0, 0, SKIP | OPT, 0,
_H2250LogicalChannelParameters_nonStandard},
{FNAME("sessionID") INT, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("associatedSessionID") INT, 8, 1, 0, SKIP | OPT, 0, NULL},
{FNAME("mediaChannel") CHOICE, 1, 2, 2, DECODE | EXT | OPT,
offsetof(H2250LogicalChannelParameters, mediaChannel),
_H245_TransportAddress},
{FNAME("mediaGuaranteedDelivery") BOOL, FIXD, 0, 0, SKIP | OPT, 0,
NULL},
{FNAME("mediaControlChannel") CHOICE, 1, 2, 2, DECODE | EXT | OPT,
offsetof(H2250LogicalChannelParameters, mediaControlChannel),
_H245_TransportAddress},
{FNAME("mediaControlGuaranteedDelivery") BOOL, FIXD, 0, 0, STOP | OPT,
0, NULL},
{FNAME("silenceSuppression") BOOL, FIXD, 0, 0, STOP | OPT, 0, NULL},
{FNAME("destination") SEQ, 0, 2, 2, STOP | EXT | OPT, 0, NULL},
{FNAME("dynamicRTPPayloadType") INT, 5, 96, 0, STOP | OPT, 0, NULL},
{FNAME("mediaPacketization") CHOICE, 0, 1, 2, STOP | EXT | OPT, 0,
NULL},
{FNAME("transportCapability") SEQ, 3, 3, 3, STOP | EXT | OPT, 0,
NULL},
{FNAME("redundancyEncoding") SEQ, 1, 2, 2, STOP | EXT | OPT, 0, NULL},
{FNAME("source") SEQ, 0, 2, 2, SKIP | EXT | OPT, 0, NULL},
};
static const struct field_t _OpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters[] = { /* CHOICE */
{FNAME("h222LogicalChannelParameters") SEQ, 3, 5, 5, SKIP | EXT, 0,
_H222LogicalChannelParameters},
{FNAME("h223LogicalChannelParameters") SEQ, 0, 2, 2, SKIP | EXT, 0,
_H223LogicalChannelParameters},
{FNAME("v76LogicalChannelParameters") SEQ, 0, 5, 5, SKIP | EXT, 0,
_V76LogicalChannelParameters},
{FNAME("h2250LogicalChannelParameters") SEQ, 10, 11, 14, DECODE | EXT,
offsetof
(OpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters,
h2250LogicalChannelParameters), _H2250LogicalChannelParameters},
{FNAME("none") NUL, FIXD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _OpenLogicalChannel_forwardLogicalChannelParameters[] = { /* SEQUENCE */
{FNAME("portNumber") INT, WORD, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("dataType") CHOICE, 3, 6, 9, DECODE | EXT,
offsetof(OpenLogicalChannel_forwardLogicalChannelParameters,
dataType), _DataType},
{FNAME("multiplexParameters") CHOICE, 2, 3, 5, DECODE | EXT,
offsetof(OpenLogicalChannel_forwardLogicalChannelParameters,
multiplexParameters),
_OpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters},
{FNAME("forwardLogicalChannelDependency") INT, WORD, 1, 0, SKIP | OPT,
0, NULL},
{FNAME("replacementFor") INT, WORD, 1, 0, SKIP | OPT, 0, NULL},
};
static const struct field_t _OpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters[] = { /* CHOICE */
{FNAME("h223LogicalChannelParameters") SEQ, 0, 2, 2, SKIP | EXT, 0,
_H223LogicalChannelParameters},
{FNAME("v76LogicalChannelParameters") SEQ, 0, 5, 5, SKIP | EXT, 0,
_V76LogicalChannelParameters},
{FNAME("h2250LogicalChannelParameters") SEQ, 10, 11, 14, DECODE | EXT,
offsetof
(OpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters,
h2250LogicalChannelParameters), _H2250LogicalChannelParameters},
};
static const struct field_t _OpenLogicalChannel_reverseLogicalChannelParameters[] = { /* SEQUENCE */
{FNAME("dataType") CHOICE, 3, 6, 9, SKIP | EXT, 0, _DataType},
{FNAME("multiplexParameters") CHOICE, 1, 2, 3, DECODE | EXT | OPT,
offsetof(OpenLogicalChannel_reverseLogicalChannelParameters,
multiplexParameters),
_OpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters},
{FNAME("reverseLogicalChannelDependency") INT, WORD, 1, 0, SKIP | OPT,
0, NULL},
{FNAME("replacementFor") INT, WORD, 1, 0, SKIP | OPT, 0, NULL},
};
static const struct field_t _NetworkAccessParameters_distribution[] = { /* CHOICE */
{FNAME("unicast") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("multicast") NUL, FIXD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _Q2931Address_address[] = { /* CHOICE */
{FNAME("internationalNumber") NUMSTR, 4, 1, 0, SKIP, 0, NULL},
{FNAME("nsapAddress") OCTSTR, 5, 1, 0, SKIP, 0, NULL},
};
static const struct field_t _Q2931Address[] = { /* SEQUENCE */
{FNAME("address") CHOICE, 1, 2, 2, SKIP | EXT, 0,
_Q2931Address_address},
{FNAME("subaddress") OCTSTR, 5, 1, 0, SKIP | OPT, 0, NULL},
};
static const struct field_t _NetworkAccessParameters_networkAddress[] = { /* CHOICE */
{FNAME("q2931Address") SEQ, 1, 2, 2, SKIP | EXT, 0, _Q2931Address},
{FNAME("e164Address") NUMDGT, 7, 1, 0, SKIP, 0, NULL},
{FNAME("localAreaAddress") CHOICE, 1, 2, 2, DECODE | EXT,
offsetof(NetworkAccessParameters_networkAddress, localAreaAddress),
_H245_TransportAddress},
};
static const struct field_t _NetworkAccessParameters[] = { /* SEQUENCE */
{FNAME("distribution") CHOICE, 1, 2, 2, SKIP | EXT | OPT, 0,
_NetworkAccessParameters_distribution},
{FNAME("networkAddress") CHOICE, 2, 3, 3, DECODE | EXT,
offsetof(NetworkAccessParameters, networkAddress),
_NetworkAccessParameters_networkAddress},
{FNAME("associateConference") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("externalReference") OCTSTR, 8, 1, 0, SKIP | OPT, 0, NULL},
{FNAME("t120SetupProcedure") CHOICE, 2, 3, 3, SKIP | EXT | OPT, 0,
NULL},
};
static const struct field_t _OpenLogicalChannel[] = { /* SEQUENCE */
{FNAME("forwardLogicalChannelNumber") INT, WORD, 1, 0, SKIP, 0, NULL},
{FNAME("forwardLogicalChannelParameters") SEQ, 1, 3, 5, DECODE | EXT,
offsetof(OpenLogicalChannel, forwardLogicalChannelParameters),
_OpenLogicalChannel_forwardLogicalChannelParameters},
{FNAME("reverseLogicalChannelParameters") SEQ, 1, 2, 4,
DECODE | EXT | OPT, offsetof(OpenLogicalChannel,
reverseLogicalChannelParameters),
_OpenLogicalChannel_reverseLogicalChannelParameters},
{FNAME("separateStack") SEQ, 2, 4, 5, DECODE | EXT | OPT,
offsetof(OpenLogicalChannel, separateStack),
_NetworkAccessParameters},
{FNAME("encryptionSync") SEQ, 2, 4, 4, STOP | EXT | OPT, 0, NULL},
};
static const struct field_t _Setup_UUIE_fastStart[] = { /* SEQUENCE OF */
{FNAME("item") SEQ, 1, 3, 5, DECODE | OPEN | EXT,
sizeof(OpenLogicalChannel), _OpenLogicalChannel}
,
};
static const struct field_t _Setup_UUIE[] = { /* SEQUENCE */
{FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("h245Address") CHOICE, 3, 7, 7, DECODE | EXT | OPT,
offsetof(Setup_UUIE, h245Address), _TransportAddress},
{FNAME("sourceAddress") SEQOF, SEMI, 0, 0, SKIP | OPT, 0,
_Setup_UUIE_sourceAddress},
{FNAME("sourceInfo") SEQ, 6, 8, 10, SKIP | EXT, 0, _EndpointType},
{FNAME("destinationAddress") SEQOF, SEMI, 0, 0, SKIP | OPT, 0,
_Setup_UUIE_destinationAddress},
{FNAME("destCallSignalAddress") CHOICE, 3, 7, 7, DECODE | EXT | OPT,
offsetof(Setup_UUIE, destCallSignalAddress), _TransportAddress},
{FNAME("destExtraCallInfo") SEQOF, SEMI, 0, 0, SKIP | OPT, 0,
_Setup_UUIE_destExtraCallInfo},
{FNAME("destExtraCRV") SEQOF, SEMI, 0, 0, SKIP | OPT, 0,
_Setup_UUIE_destExtraCRV},
{FNAME("activeMC") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("conferenceID") OCTSTR, FIXD, 16, 0, SKIP, 0, NULL},
{FNAME("conferenceGoal") CHOICE, 2, 3, 5, SKIP | EXT, 0,
_Setup_UUIE_conferenceGoal},
{FNAME("callServices") SEQ, 0, 8, 8, SKIP | EXT | OPT, 0,
_QseriesOptions},
{FNAME("callType") CHOICE, 2, 4, 4, SKIP | EXT, 0, _CallType},
{FNAME("sourceCallSignalAddress") CHOICE, 3, 7, 7, DECODE | EXT | OPT,
offsetof(Setup_UUIE, sourceCallSignalAddress), _TransportAddress},
{FNAME("remoteExtensionAddress") CHOICE, 1, 2, 7, SKIP | EXT | OPT, 0,
NULL},
{FNAME("callIdentifier") SEQ, 0, 1, 1, SKIP | EXT, 0, NULL},
{FNAME("h245SecurityCapability") SEQOF, SEMI, 0, 0, SKIP | OPT, 0,
NULL},
{FNAME("tokens") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("cryptoTokens") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("fastStart") SEQOF, SEMI, 0, 30, DECODE | OPT,
offsetof(Setup_UUIE, fastStart), _Setup_UUIE_fastStart},
{FNAME("mediaWaitForConnect") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("canOverlapSend") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("endpointIdentifier") BMPSTR, 7, 1, 0, STOP | OPT, 0, NULL},
{FNAME("multipleCalls") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("maintainConnection") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("connectionParameters") SEQ, 0, 3, 3, SKIP | EXT | OPT, 0,
NULL},
{FNAME("language") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("presentationIndicator") CHOICE, 2, 3, 3, SKIP | EXT | OPT, 0,
NULL},
{FNAME("screeningIndicator") ENUM, 2, 0, 0, SKIP | EXT | OPT, 0,
NULL},
{FNAME("serviceControl") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("symmetricOperationRequired") NUL, FIXD, 0, 0, SKIP | OPT, 0,
NULL},
{FNAME("capacity") SEQ, 2, 2, 2, SKIP | EXT | OPT, 0, NULL},
{FNAME("circuitInfo") SEQ, 3, 3, 3, SKIP | EXT | OPT, 0, NULL},
{FNAME("desiredProtocols") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("neededFeatures") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("desiredFeatures") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("supportedFeatures") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("parallelH245Control") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("additionalSourceAddresses") SEQOF, SEMI, 0, 0, SKIP | OPT, 0,
NULL},
};
static const struct field_t _CallProceeding_UUIE_fastStart[] = { /* SEQUENCE OF */
{FNAME("item") SEQ, 1, 3, 5, DECODE | OPEN | EXT,
sizeof(OpenLogicalChannel), _OpenLogicalChannel}
,
};
static const struct field_t _CallProceeding_UUIE[] = { /* SEQUENCE */
{FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("destinationInfo") SEQ, 6, 8, 10, SKIP | EXT, 0,
_EndpointType},
{FNAME("h245Address") CHOICE, 3, 7, 7, DECODE | EXT | OPT,
offsetof(CallProceeding_UUIE, h245Address), _TransportAddress},
{FNAME("callIdentifier") SEQ, 0, 1, 1, SKIP | EXT, 0, NULL},
{FNAME("h245SecurityMode") CHOICE, 2, 4, 4, SKIP | EXT | OPT, 0,
NULL},
{FNAME("tokens") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("cryptoTokens") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("fastStart") SEQOF, SEMI, 0, 30, DECODE | OPT,
offsetof(CallProceeding_UUIE, fastStart),
_CallProceeding_UUIE_fastStart},
{FNAME("multipleCalls") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("maintainConnection") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("fastConnectRefused") NUL, FIXD, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("featureSet") SEQ, 3, 4, 4, SKIP | EXT | OPT, 0, NULL},
};
static const struct field_t _Connect_UUIE_fastStart[] = { /* SEQUENCE OF */
{FNAME("item") SEQ, 1, 3, 5, DECODE | OPEN | EXT,
sizeof(OpenLogicalChannel), _OpenLogicalChannel}
,
};
static const struct field_t _Connect_UUIE[] = { /* SEQUENCE */
{FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("h245Address") CHOICE, 3, 7, 7, DECODE | EXT | OPT,
offsetof(Connect_UUIE, h245Address), _TransportAddress},
{FNAME("destinationInfo") SEQ, 6, 8, 10, SKIP | EXT, 0,
_EndpointType},
{FNAME("conferenceID") OCTSTR, FIXD, 16, 0, SKIP, 0, NULL},
{FNAME("callIdentifier") SEQ, 0, 1, 1, SKIP | EXT, 0, NULL},
{FNAME("h245SecurityMode") CHOICE, 2, 4, 4, SKIP | EXT | OPT, 0,
NULL},
{FNAME("tokens") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("cryptoTokens") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("fastStart") SEQOF, SEMI, 0, 30, DECODE | OPT,
offsetof(Connect_UUIE, fastStart), _Connect_UUIE_fastStart},
{FNAME("multipleCalls") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("maintainConnection") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("language") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("connectedAddress") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("presentationIndicator") CHOICE, 2, 3, 3, SKIP | EXT | OPT, 0,
NULL},
{FNAME("screeningIndicator") ENUM, 2, 0, 0, SKIP | EXT | OPT, 0,
NULL},
{FNAME("fastConnectRefused") NUL, FIXD, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("serviceControl") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("capacity") SEQ, 2, 2, 2, SKIP | EXT | OPT, 0, NULL},
{FNAME("featureSet") SEQ, 3, 4, 4, SKIP | EXT | OPT, 0, NULL},
};
static const struct field_t _Alerting_UUIE_fastStart[] = { /* SEQUENCE OF */
{FNAME("item") SEQ, 1, 3, 5, DECODE | OPEN | EXT,
sizeof(OpenLogicalChannel), _OpenLogicalChannel}
,
};
static const struct field_t _Alerting_UUIE[] = { /* SEQUENCE */
{FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("destinationInfo") SEQ, 6, 8, 10, SKIP | EXT, 0,
_EndpointType},
{FNAME("h245Address") CHOICE, 3, 7, 7, DECODE | EXT | OPT,
offsetof(Alerting_UUIE, h245Address), _TransportAddress},
{FNAME("callIdentifier") SEQ, 0, 1, 1, SKIP | EXT, 0, NULL},
{FNAME("h245SecurityMode") CHOICE, 2, 4, 4, SKIP | EXT | OPT, 0,
NULL},
{FNAME("tokens") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("cryptoTokens") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("fastStart") SEQOF, SEMI, 0, 30, DECODE | OPT,
offsetof(Alerting_UUIE, fastStart), _Alerting_UUIE_fastStart},
{FNAME("multipleCalls") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("maintainConnection") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("alertingAddress") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("presentationIndicator") CHOICE, 2, 3, 3, SKIP | EXT | OPT, 0,
NULL},
{FNAME("screeningIndicator") ENUM, 2, 0, 0, SKIP | EXT | OPT, 0,
NULL},
{FNAME("fastConnectRefused") NUL, FIXD, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("serviceControl") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("capacity") SEQ, 2, 2, 2, SKIP | EXT | OPT, 0, NULL},
{FNAME("featureSet") SEQ, 3, 4, 4, SKIP | EXT | OPT, 0, NULL},
};
static const struct field_t _Information_UUIE[] = { /* SEQUENCE */
{FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("callIdentifier") SEQ, 0, 1, 1, SKIP | EXT, 0, NULL},
{FNAME("tokens") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("cryptoTokens") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("fastStart") SEQOF, SEMI, 0, 30, SKIP | OPT, 0, NULL},
{FNAME("fastConnectRefused") NUL, FIXD, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("circuitInfo") SEQ, 3, 3, 3, SKIP | EXT | OPT, 0, NULL},
};
static const struct field_t _ReleaseCompleteReason[] = { /* CHOICE */
{FNAME("noBandwidth") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("gatekeeperResources") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("unreachableDestination") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("destinationRejection") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("invalidRevision") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("noPermission") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("unreachableGatekeeper") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("gatewayResources") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("badFormatAddress") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("adaptiveBusy") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("inConf") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("undefinedReason") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("facilityCallDeflection") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("securityDenied") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("calledPartyNotRegistered") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("callerNotRegistered") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("newConnectionNeeded") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("nonStandardReason") SEQ, 0, 2, 2, SKIP, 0, NULL},
{FNAME("replaceWithConferenceInvite") OCTSTR, FIXD, 16, 0, SKIP, 0,
NULL},
{FNAME("genericDataReason") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("neededFeatureNotSupported") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("tunnelledSignallingRejected") NUL, FIXD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _ReleaseComplete_UUIE[] = { /* SEQUENCE */
{FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("reason") CHOICE, 4, 12, 22, SKIP | EXT | OPT, 0,
_ReleaseCompleteReason},
{FNAME("callIdentifier") SEQ, 0, 1, 1, SKIP | EXT, 0, NULL},
{FNAME("tokens") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("cryptoTokens") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("busyAddress") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("presentationIndicator") CHOICE, 2, 3, 3, SKIP | EXT | OPT, 0,
NULL},
{FNAME("screeningIndicator") ENUM, 2, 0, 0, SKIP | EXT | OPT, 0,
NULL},
{FNAME("capacity") SEQ, 2, 2, 2, SKIP | EXT | OPT, 0, NULL},
{FNAME("serviceControl") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("featureSet") SEQ, 3, 4, 4, SKIP | EXT | OPT, 0, NULL},
};
static const struct field_t _Facility_UUIE_alternativeAliasAddress[] = { /* SEQUENCE OF */
{FNAME("item") CHOICE, 1, 2, 7, SKIP | EXT, 0, _AliasAddress},
};
static const struct field_t _FacilityReason[] = { /* CHOICE */
{FNAME("routeCallToGatekeeper") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("callForwarded") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("routeCallToMC") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("undefinedReason") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("conferenceListChoice") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("startH245") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("noH245") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("newTokens") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("featureSetUpdate") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("forwardedElements") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("transportedInformation") NUL, FIXD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _Facility_UUIE_fastStart[] = { /* SEQUENCE OF */
{FNAME("item") SEQ, 1, 3, 5, DECODE | OPEN | EXT,
sizeof(OpenLogicalChannel), _OpenLogicalChannel}
,
};
static const struct field_t _Facility_UUIE[] = { /* SEQUENCE */
{FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("alternativeAddress") CHOICE, 3, 7, 7, DECODE | EXT | OPT,
offsetof(Facility_UUIE, alternativeAddress), _TransportAddress},
{FNAME("alternativeAliasAddress") SEQOF, SEMI, 0, 0, SKIP | OPT, 0,
_Facility_UUIE_alternativeAliasAddress},
{FNAME("conferenceID") OCTSTR, FIXD, 16, 0, SKIP | OPT, 0, NULL},
{FNAME("reason") CHOICE, 2, 4, 11, DECODE | EXT,
offsetof(Facility_UUIE, reason), _FacilityReason},
{FNAME("callIdentifier") SEQ, 0, 1, 1, SKIP | EXT, 0, NULL},
{FNAME("destExtraCallInfo") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("remoteExtensionAddress") CHOICE, 1, 2, 7, SKIP | EXT | OPT, 0,
NULL},
{FNAME("tokens") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("cryptoTokens") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("conferences") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("h245Address") CHOICE, 3, 7, 7, DECODE | EXT | OPT,
offsetof(Facility_UUIE, h245Address), _TransportAddress},
{FNAME("fastStart") SEQOF, SEMI, 0, 30, DECODE | OPT,
offsetof(Facility_UUIE, fastStart), _Facility_UUIE_fastStart},
{FNAME("multipleCalls") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("maintainConnection") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("fastConnectRefused") NUL, FIXD, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("serviceControl") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("circuitInfo") SEQ, 3, 3, 3, SKIP | EXT | OPT, 0, NULL},
{FNAME("featureSet") SEQ, 3, 4, 4, SKIP | EXT | OPT, 0, NULL},
{FNAME("destinationInfo") SEQ, 6, 8, 10, SKIP | EXT | OPT, 0, NULL},
{FNAME("h245SecurityMode") CHOICE, 2, 4, 4, SKIP | EXT | OPT, 0,
NULL},
};
static const struct field_t _CallIdentifier[] = { /* SEQUENCE */
{FNAME("guid") OCTSTR, FIXD, 16, 0, SKIP, 0, NULL},
};
static const struct field_t _SecurityServiceMode[] = { /* CHOICE */
{FNAME("nonStandard") SEQ, 0, 2, 2, SKIP, 0, _NonStandardParameter},
{FNAME("none") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("default") NUL, FIXD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _SecurityCapabilities[] = { /* SEQUENCE */
{FNAME("nonStandard") SEQ, 0, 2, 2, SKIP | OPT, 0,
_NonStandardParameter},
{FNAME("encryption") CHOICE, 2, 3, 3, SKIP | EXT, 0,
_SecurityServiceMode},
{FNAME("authenticaton") CHOICE, 2, 3, 3, SKIP | EXT, 0,
_SecurityServiceMode},
{FNAME("integrity") CHOICE, 2, 3, 3, SKIP | EXT, 0,
_SecurityServiceMode},
};
static const struct field_t _H245Security[] = { /* CHOICE */
{FNAME("nonStandard") SEQ, 0, 2, 2, SKIP, 0, _NonStandardParameter},
{FNAME("noSecurity") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("tls") SEQ, 1, 4, 4, SKIP | EXT, 0, _SecurityCapabilities},
{FNAME("ipsec") SEQ, 1, 4, 4, SKIP | EXT, 0, _SecurityCapabilities},
};
static const struct field_t _DHset[] = { /* SEQUENCE */
{FNAME("halfkey") BITSTR, WORD, 0, 0, SKIP, 0, NULL},
{FNAME("modSize") BITSTR, WORD, 0, 0, SKIP, 0, NULL},
{FNAME("generator") BITSTR, WORD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _TypedCertificate[] = { /* SEQUENCE */
{FNAME("type") OID, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("certificate") OCTSTR, SEMI, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _H235_NonStandardParameter[] = { /* SEQUENCE */
{FNAME("nonStandardIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("data") OCTSTR, SEMI, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _ClearToken[] = { /* SEQUENCE */
{FNAME("tokenOID") OID, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("timeStamp") INT, CONS, 1, 0, SKIP | OPT, 0, NULL},
{FNAME("password") BMPSTR, 7, 1, 0, SKIP | OPT, 0, NULL},
{FNAME("dhkey") SEQ, 0, 3, 3, SKIP | EXT | OPT, 0, _DHset},
{FNAME("challenge") OCTSTR, 7, 8, 0, SKIP | OPT, 0, NULL},
{FNAME("random") INT, UNCO, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("certificate") SEQ, 0, 2, 2, SKIP | EXT | OPT, 0,
_TypedCertificate},
{FNAME("generalID") BMPSTR, 7, 1, 0, SKIP | OPT, 0, NULL},
{FNAME("nonStandard") SEQ, 0, 2, 2, SKIP | OPT, 0,
_H235_NonStandardParameter},
{FNAME("eckasdhkey") CHOICE, 1, 2, 2, SKIP | EXT | OPT, 0, NULL},
{FNAME("sendersID") BMPSTR, 7, 1, 0, SKIP | OPT, 0, NULL},
};
static const struct field_t _Progress_UUIE_tokens[] = { /* SEQUENCE OF */
{FNAME("item") SEQ, 8, 9, 11, SKIP | EXT, 0, _ClearToken},
};
static const struct field_t _Params[] = { /* SEQUENCE */
{FNAME("ranInt") INT, UNCO, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("iv8") OCTSTR, FIXD, 8, 0, SKIP | OPT, 0, NULL},
{FNAME("iv16") OCTSTR, FIXD, 16, 0, SKIP | OPT, 0, NULL},
};
static const struct field_t _CryptoH323Token_cryptoEPPwdHash_token[] = { /* SEQUENCE */
{FNAME("algorithmOID") OID, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("paramS") SEQ, 2, 2, 3, SKIP | EXT, 0, _Params},
{FNAME("hash") BITSTR, SEMI, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _CryptoH323Token_cryptoEPPwdHash[] = { /* SEQUENCE */
{FNAME("alias") CHOICE, 1, 2, 7, SKIP | EXT, 0, _AliasAddress},
{FNAME("timeStamp") INT, CONS, 1, 0, SKIP, 0, NULL},
{FNAME("token") SEQ, 0, 3, 3, SKIP, 0,
_CryptoH323Token_cryptoEPPwdHash_token},
};
static const struct field_t _CryptoH323Token_cryptoGKPwdHash_token[] = { /* SEQUENCE */
{FNAME("algorithmOID") OID, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("paramS") SEQ, 2, 2, 3, SKIP | EXT, 0, _Params},
{FNAME("hash") BITSTR, SEMI, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _CryptoH323Token_cryptoGKPwdHash[] = { /* SEQUENCE */
{FNAME("gatekeeperId") BMPSTR, 7, 1, 0, SKIP, 0, NULL},
{FNAME("timeStamp") INT, CONS, 1, 0, SKIP, 0, NULL},
{FNAME("token") SEQ, 0, 3, 3, SKIP, 0,
_CryptoH323Token_cryptoGKPwdHash_token},
};
static const struct field_t _CryptoH323Token_cryptoEPPwdEncr[] = { /* SEQUENCE */
{FNAME("algorithmOID") OID, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("paramS") SEQ, 2, 2, 3, SKIP | EXT, 0, _Params},
{FNAME("encryptedData") OCTSTR, SEMI, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _CryptoH323Token_cryptoGKPwdEncr[] = { /* SEQUENCE */
{FNAME("algorithmOID") OID, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("paramS") SEQ, 2, 2, 3, SKIP | EXT, 0, _Params},
{FNAME("encryptedData") OCTSTR, SEMI, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _CryptoH323Token_cryptoEPCert[] = { /* SEQUENCE */
{FNAME("toBeSigned") SEQ, 8, 9, 11, SKIP | OPEN | EXT, 0, NULL},
{FNAME("algorithmOID") OID, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("paramS") SEQ, 2, 2, 3, SKIP | EXT, 0, _Params},
{FNAME("signature") BITSTR, SEMI, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _CryptoH323Token_cryptoGKCert[] = { /* SEQUENCE */
{FNAME("toBeSigned") SEQ, 8, 9, 11, SKIP | OPEN | EXT, 0, NULL},
{FNAME("algorithmOID") OID, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("paramS") SEQ, 2, 2, 3, SKIP | EXT, 0, _Params},
{FNAME("signature") BITSTR, SEMI, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _CryptoH323Token_cryptoFastStart[] = { /* SEQUENCE */
{FNAME("toBeSigned") SEQ, 8, 9, 11, SKIP | OPEN | EXT, 0, NULL},
{FNAME("algorithmOID") OID, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("paramS") SEQ, 2, 2, 3, SKIP | EXT, 0, _Params},
{FNAME("signature") BITSTR, SEMI, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _CryptoToken_cryptoEncryptedToken_token[] = { /* SEQUENCE */
{FNAME("algorithmOID") OID, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("paramS") SEQ, 2, 2, 3, SKIP | EXT, 0, _Params},
{FNAME("encryptedData") OCTSTR, SEMI, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _CryptoToken_cryptoEncryptedToken[] = { /* SEQUENCE */
{FNAME("tokenOID") OID, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("token") SEQ, 0, 3, 3, SKIP, 0,
_CryptoToken_cryptoEncryptedToken_token},
};
static const struct field_t _CryptoToken_cryptoSignedToken_token[] = { /* SEQUENCE */
{FNAME("toBeSigned") SEQ, 8, 9, 11, SKIP | OPEN | EXT, 0, NULL},
{FNAME("algorithmOID") OID, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("paramS") SEQ, 2, 2, 3, SKIP | EXT, 0, _Params},
{FNAME("signature") BITSTR, SEMI, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _CryptoToken_cryptoSignedToken[] = { /* SEQUENCE */
{FNAME("tokenOID") OID, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("token") SEQ, 0, 4, 4, SKIP, 0,
_CryptoToken_cryptoSignedToken_token},
};
static const struct field_t _CryptoToken_cryptoHashedToken_token[] = { /* SEQUENCE */
{FNAME("algorithmOID") OID, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("paramS") SEQ, 2, 2, 3, SKIP | EXT, 0, _Params},
{FNAME("hash") BITSTR, SEMI, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _CryptoToken_cryptoHashedToken[] = { /* SEQUENCE */
{FNAME("tokenOID") OID, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("hashedVals") SEQ, 8, 9, 11, SKIP | EXT, 0, _ClearToken},
{FNAME("token") SEQ, 0, 3, 3, SKIP, 0,
_CryptoToken_cryptoHashedToken_token},
};
static const struct field_t _CryptoToken_cryptoPwdEncr[] = { /* SEQUENCE */
{FNAME("algorithmOID") OID, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("paramS") SEQ, 2, 2, 3, SKIP | EXT, 0, _Params},
{FNAME("encryptedData") OCTSTR, SEMI, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _CryptoToken[] = { /* CHOICE */
{FNAME("cryptoEncryptedToken") SEQ, 0, 2, 2, SKIP, 0,
_CryptoToken_cryptoEncryptedToken},
{FNAME("cryptoSignedToken") SEQ, 0, 2, 2, SKIP, 0,
_CryptoToken_cryptoSignedToken},
{FNAME("cryptoHashedToken") SEQ, 0, 3, 3, SKIP, 0,
_CryptoToken_cryptoHashedToken},
{FNAME("cryptoPwdEncr") SEQ, 0, 3, 3, SKIP, 0,
_CryptoToken_cryptoPwdEncr},
};
static const struct field_t _CryptoH323Token[] = { /* CHOICE */
{FNAME("cryptoEPPwdHash") SEQ, 0, 3, 3, SKIP, 0,
_CryptoH323Token_cryptoEPPwdHash},
{FNAME("cryptoGKPwdHash") SEQ, 0, 3, 3, SKIP, 0,
_CryptoH323Token_cryptoGKPwdHash},
{FNAME("cryptoEPPwdEncr") SEQ, 0, 3, 3, SKIP, 0,
_CryptoH323Token_cryptoEPPwdEncr},
{FNAME("cryptoGKPwdEncr") SEQ, 0, 3, 3, SKIP, 0,
_CryptoH323Token_cryptoGKPwdEncr},
{FNAME("cryptoEPCert") SEQ, 0, 4, 4, SKIP, 0,
_CryptoH323Token_cryptoEPCert},
{FNAME("cryptoGKCert") SEQ, 0, 4, 4, SKIP, 0,
_CryptoH323Token_cryptoGKCert},
{FNAME("cryptoFastStart") SEQ, 0, 4, 4, SKIP, 0,
_CryptoH323Token_cryptoFastStart},
{FNAME("nestedcryptoToken") CHOICE, 2, 4, 4, SKIP | EXT, 0,
_CryptoToken},
};
static const struct field_t _Progress_UUIE_cryptoTokens[] = { /* SEQUENCE OF */
{FNAME("item") CHOICE, 3, 8, 8, SKIP | EXT, 0, _CryptoH323Token},
};
static const struct field_t _Progress_UUIE_fastStart[] = { /* SEQUENCE OF */
{FNAME("item") SEQ, 1, 3, 5, DECODE | OPEN | EXT,
sizeof(OpenLogicalChannel), _OpenLogicalChannel}
,
};
static const struct field_t _Progress_UUIE[] = { /* SEQUENCE */
{FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("destinationInfo") SEQ, 6, 8, 10, SKIP | EXT, 0,
_EndpointType},
{FNAME("h245Address") CHOICE, 3, 7, 7, DECODE | EXT | OPT,
offsetof(Progress_UUIE, h245Address), _TransportAddress},
{FNAME("callIdentifier") SEQ, 0, 1, 1, SKIP | EXT, 0,
_CallIdentifier},
{FNAME("h245SecurityMode") CHOICE, 2, 4, 4, SKIP | EXT | OPT, 0,
_H245Security},
{FNAME("tokens") SEQOF, SEMI, 0, 0, SKIP | OPT, 0,
_Progress_UUIE_tokens},
{FNAME("cryptoTokens") SEQOF, SEMI, 0, 0, SKIP | OPT, 0,
_Progress_UUIE_cryptoTokens},
{FNAME("fastStart") SEQOF, SEMI, 0, 30, DECODE | OPT,
offsetof(Progress_UUIE, fastStart), _Progress_UUIE_fastStart},
{FNAME("multipleCalls") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("maintainConnection") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("fastConnectRefused") NUL, FIXD, 0, 0, SKIP | OPT, 0, NULL},
};
static const struct field_t _H323_UU_PDU_h323_message_body[] = { /* CHOICE */
{FNAME("setup") SEQ, 7, 13, 39, DECODE | EXT,
offsetof(H323_UU_PDU_h323_message_body, setup), _Setup_UUIE},
{FNAME("callProceeding") SEQ, 1, 3, 12, DECODE | EXT,
offsetof(H323_UU_PDU_h323_message_body, callProceeding),
_CallProceeding_UUIE},
{FNAME("connect") SEQ, 1, 4, 19, DECODE | EXT,
offsetof(H323_UU_PDU_h323_message_body, connect), _Connect_UUIE},
{FNAME("alerting") SEQ, 1, 3, 17, DECODE | EXT,
offsetof(H323_UU_PDU_h323_message_body, alerting), _Alerting_UUIE},
{FNAME("information") SEQ, 0, 1, 7, SKIP | EXT, 0, _Information_UUIE},
{FNAME("releaseComplete") SEQ, 1, 2, 11, SKIP | EXT, 0,
_ReleaseComplete_UUIE},
{FNAME("facility") SEQ, 3, 5, 21, DECODE | EXT,
offsetof(H323_UU_PDU_h323_message_body, facility), _Facility_UUIE},
{FNAME("progress") SEQ, 5, 8, 11, DECODE | EXT,
offsetof(H323_UU_PDU_h323_message_body, progress), _Progress_UUIE},
{FNAME("empty") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("status") SEQ, 2, 4, 4, SKIP | EXT, 0, NULL},
{FNAME("statusInquiry") SEQ, 2, 4, 4, SKIP | EXT, 0, NULL},
{FNAME("setupAcknowledge") SEQ, 2, 4, 4, SKIP | EXT, 0, NULL},
{FNAME("notify") SEQ, 2, 4, 4, SKIP | EXT, 0, NULL},
};
static const struct field_t _RequestMessage[] = { /* CHOICE */
{FNAME("nonStandard") SEQ, 0, 1, 1, STOP | EXT, 0, NULL},
{FNAME("masterSlaveDetermination") SEQ, 0, 2, 2, STOP | EXT, 0, NULL},
{FNAME("terminalCapabilitySet") SEQ, 3, 5, 5, STOP | EXT, 0, NULL},
{FNAME("openLogicalChannel") SEQ, 1, 3, 5, DECODE | EXT,
offsetof(RequestMessage, openLogicalChannel), _OpenLogicalChannel},
{FNAME("closeLogicalChannel") SEQ, 0, 2, 3, STOP | EXT, 0, NULL},
{FNAME("requestChannelClose") SEQ, 0, 1, 3, STOP | EXT, 0, NULL},
{FNAME("multiplexEntrySend") SEQ, 0, 2, 2, STOP | EXT, 0, NULL},
{FNAME("requestMultiplexEntry") SEQ, 0, 1, 1, STOP | EXT, 0, NULL},
{FNAME("requestMode") SEQ, 0, 2, 2, STOP | EXT, 0, NULL},
{FNAME("roundTripDelayRequest") SEQ, 0, 1, 1, STOP | EXT, 0, NULL},
{FNAME("maintenanceLoopRequest") SEQ, 0, 1, 1, STOP | EXT, 0, NULL},
{FNAME("communicationModeRequest") SEQ, 0, 0, 0, STOP | EXT, 0, NULL},
{FNAME("conferenceRequest") CHOICE, 3, 8, 16, STOP | EXT, 0, NULL},
{FNAME("multilinkRequest") CHOICE, 3, 5, 5, STOP | EXT, 0, NULL},
{FNAME("logicalChannelRateRequest") SEQ, 0, 3, 3, STOP | EXT, 0,
NULL},
};
static const struct field_t _OpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters[] = { /* CHOICE */
{FNAME("h222LogicalChannelParameters") SEQ, 3, 5, 5, SKIP | EXT, 0,
_H222LogicalChannelParameters},
{FNAME("h2250LogicalChannelParameters") SEQ, 10, 11, 14, DECODE | EXT,
offsetof
(OpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters,
h2250LogicalChannelParameters), _H2250LogicalChannelParameters},
};
static const struct field_t _OpenLogicalChannelAck_reverseLogicalChannelParameters[] = { /* SEQUENCE */
{FNAME("reverseLogicalChannelNumber") INT, WORD, 1, 0, SKIP, 0, NULL},
{FNAME("portNumber") INT, WORD, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("multiplexParameters") CHOICE, 0, 1, 2, DECODE | EXT | OPT,
offsetof(OpenLogicalChannelAck_reverseLogicalChannelParameters,
multiplexParameters),
_OpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters},
{FNAME("replacementFor") INT, WORD, 1, 0, SKIP | OPT, 0, NULL},
};
static const struct field_t _H2250LogicalChannelAckParameters_nonStandard[] = { /* SEQUENCE OF */
{FNAME("item") SEQ, 0, 2, 2, SKIP, 0, _H245_NonStandardParameter},
};
static const struct field_t _H2250LogicalChannelAckParameters[] = { /* SEQUENCE */
{FNAME("nonStandard") SEQOF, SEMI, 0, 0, SKIP | OPT, 0,
_H2250LogicalChannelAckParameters_nonStandard},
{FNAME("sessionID") INT, 8, 1, 0, SKIP | OPT, 0, NULL},
{FNAME("mediaChannel") CHOICE, 1, 2, 2, DECODE | EXT | OPT,
offsetof(H2250LogicalChannelAckParameters, mediaChannel),
_H245_TransportAddress},
{FNAME("mediaControlChannel") CHOICE, 1, 2, 2, DECODE | EXT | OPT,
offsetof(H2250LogicalChannelAckParameters, mediaControlChannel),
_H245_TransportAddress},
{FNAME("dynamicRTPPayloadType") INT, 5, 96, 0, SKIP | OPT, 0, NULL},
{FNAME("flowControlToZero") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("portNumber") INT, WORD, 0, 0, SKIP | OPT, 0, NULL},
};
static const struct field_t _OpenLogicalChannelAck_forwardMultiplexAckParameters[] = { /* CHOICE */
{FNAME("h2250LogicalChannelAckParameters") SEQ, 5, 5, 7, DECODE | EXT,
offsetof(OpenLogicalChannelAck_forwardMultiplexAckParameters,
h2250LogicalChannelAckParameters),
_H2250LogicalChannelAckParameters},
};
static const struct field_t _OpenLogicalChannelAck[] = { /* SEQUENCE */
{FNAME("forwardLogicalChannelNumber") INT, WORD, 1, 0, SKIP, 0, NULL},
{FNAME("reverseLogicalChannelParameters") SEQ, 2, 3, 4,
DECODE | EXT | OPT, offsetof(OpenLogicalChannelAck,
reverseLogicalChannelParameters),
_OpenLogicalChannelAck_reverseLogicalChannelParameters},
{FNAME("separateStack") SEQ, 2, 4, 5, DECODE | EXT | OPT,
offsetof(OpenLogicalChannelAck, separateStack),
_NetworkAccessParameters},
{FNAME("forwardMultiplexAckParameters") CHOICE, 0, 1, 1,
DECODE | EXT | OPT, offsetof(OpenLogicalChannelAck,
forwardMultiplexAckParameters),
_OpenLogicalChannelAck_forwardMultiplexAckParameters},
{FNAME("encryptionSync") SEQ, 2, 4, 4, STOP | EXT | OPT, 0, NULL},
};
static const struct field_t _ResponseMessage[] = { /* CHOICE */
{FNAME("nonStandard") SEQ, 0, 1, 1, STOP | EXT, 0, NULL},
{FNAME("masterSlaveDeterminationAck") SEQ, 0, 1, 1, STOP | EXT, 0,
NULL},
{FNAME("masterSlaveDeterminationReject") SEQ, 0, 1, 1, STOP | EXT, 0,
NULL},
{FNAME("terminalCapabilitySetAck") SEQ, 0, 1, 1, STOP | EXT, 0, NULL},
{FNAME("terminalCapabilitySetReject") SEQ, 0, 2, 2, STOP | EXT, 0,
NULL},
{FNAME("openLogicalChannelAck") SEQ, 1, 2, 5, DECODE | EXT,
offsetof(ResponseMessage, openLogicalChannelAck),
_OpenLogicalChannelAck},
{FNAME("openLogicalChannelReject") SEQ, 0, 2, 2, STOP | EXT, 0, NULL},
{FNAME("closeLogicalChannelAck") SEQ, 0, 1, 1, STOP | EXT, 0, NULL},
{FNAME("requestChannelCloseAck") SEQ, 0, 1, 1, STOP | EXT, 0, NULL},
{FNAME("requestChannelCloseReject") SEQ, 0, 2, 2, STOP | EXT, 0,
NULL},
{FNAME("multiplexEntrySendAck") SEQ, 0, 2, 2, STOP | EXT, 0, NULL},
{FNAME("multiplexEntrySendReject") SEQ, 0, 2, 2, STOP | EXT, 0, NULL},
{FNAME("requestMultiplexEntryAck") SEQ, 0, 1, 1, STOP | EXT, 0, NULL},
{FNAME("requestMultiplexEntryReject") SEQ, 0, 2, 2, STOP | EXT, 0,
NULL},
{FNAME("requestModeAck") SEQ, 0, 2, 2, STOP | EXT, 0, NULL},
{FNAME("requestModeReject") SEQ, 0, 2, 2, STOP | EXT, 0, NULL},
{FNAME("roundTripDelayResponse") SEQ, 0, 1, 1, STOP | EXT, 0, NULL},
{FNAME("maintenanceLoopAck") SEQ, 0, 1, 1, STOP | EXT, 0, NULL},
{FNAME("maintenanceLoopReject") SEQ, 0, 2, 2, STOP | EXT, 0, NULL},
{FNAME("communicationModeResponse") CHOICE, 0, 1, 1, STOP | EXT, 0,
NULL},
{FNAME("conferenceResponse") CHOICE, 3, 8, 16, STOP | EXT, 0, NULL},
{FNAME("multilinkResponse") CHOICE, 3, 5, 5, STOP | EXT, 0, NULL},
{FNAME("logicalChannelRateAcknowledge") SEQ, 0, 3, 3, STOP | EXT, 0,
NULL},
{FNAME("logicalChannelRateReject") SEQ, 1, 4, 4, STOP | EXT, 0, NULL},
};
static const struct field_t _MultimediaSystemControlMessage[] = { /* CHOICE */
{FNAME("request") CHOICE, 4, 11, 15, DECODE | EXT,
offsetof(MultimediaSystemControlMessage, request), _RequestMessage},
{FNAME("response") CHOICE, 5, 19, 24, DECODE | EXT,
offsetof(MultimediaSystemControlMessage, response),
_ResponseMessage},
{FNAME("command") CHOICE, 3, 7, 12, STOP | EXT, 0, NULL},
{FNAME("indication") CHOICE, 4, 14, 23, STOP | EXT, 0, NULL},
};
static const struct field_t _H323_UU_PDU_h245Control[] = { /* SEQUENCE OF */
{FNAME("item") CHOICE, 2, 4, 4, DECODE | OPEN | EXT,
sizeof(MultimediaSystemControlMessage),
_MultimediaSystemControlMessage}
,
};
static const struct field_t _H323_UU_PDU[] = { /* SEQUENCE */
{FNAME("h323-message-body") CHOICE, 3, 7, 13, DECODE | EXT,
offsetof(H323_UU_PDU, h323_message_body),
_H323_UU_PDU_h323_message_body},
{FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
_NonStandardParameter},
{FNAME("h4501SupplementaryService") SEQOF, SEMI, 0, 0, SKIP | OPT, 0,
NULL},
{FNAME("h245Tunneling") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("h245Control") SEQOF, SEMI, 0, 4, DECODE | OPT,
offsetof(H323_UU_PDU, h245Control), _H323_UU_PDU_h245Control},
{FNAME("nonStandardControl") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("callLinkage") SEQ, 2, 2, 2, STOP | EXT | OPT, 0, NULL},
{FNAME("tunnelledSignallingMessage") SEQ, 2, 4, 4, STOP | EXT | OPT,
0, NULL},
{FNAME("provisionalRespToH245Tunneling") NUL, FIXD, 0, 0, STOP | OPT,
0, NULL},
{FNAME("stimulusControl") SEQ, 3, 3, 3, STOP | EXT | OPT, 0, NULL},
{FNAME("genericData") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
};
static const struct field_t _H323_UserInformation[] = { /* SEQUENCE */
{FNAME("h323-uu-pdu") SEQ, 1, 2, 11, DECODE | EXT,
offsetof(H323_UserInformation, h323_uu_pdu), _H323_UU_PDU},
{FNAME("user-data") SEQ, 0, 2, 2, STOP | EXT | OPT, 0, NULL},
};
static const struct field_t _GatekeeperRequest[] = { /* SEQUENCE */
{FNAME("requestSeqNum") INT, WORD, 1, 0, SKIP, 0, NULL},
{FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
_NonStandardParameter},
{FNAME("rasAddress") CHOICE, 3, 7, 7, DECODE | EXT,
offsetof(GatekeeperRequest, rasAddress), _TransportAddress},
{FNAME("endpointType") SEQ, 6, 8, 10, STOP | EXT, 0, NULL},
{FNAME("gatekeeperIdentifier") BMPSTR, 7, 1, 0, STOP | OPT, 0, NULL},
{FNAME("callServices") SEQ, 0, 8, 8, STOP | EXT | OPT, 0, NULL},
{FNAME("endpointAlias") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("alternateEndpoints") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("tokens") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("cryptoTokens") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("authenticationCapability") SEQOF, SEMI, 0, 0, STOP | OPT, 0,
NULL},
{FNAME("algorithmOIDs") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("integrity") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("integrityCheckValue") SEQ, 0, 2, 2, STOP | OPT, 0, NULL},
{FNAME("supportsAltGK") NUL, FIXD, 0, 0, STOP | OPT, 0, NULL},
{FNAME("featureSet") SEQ, 3, 4, 4, STOP | EXT | OPT, 0, NULL},
{FNAME("genericData") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
};
static const struct field_t _GatekeeperConfirm[] = { /* SEQUENCE */
{FNAME("requestSeqNum") INT, WORD, 1, 0, SKIP, 0, NULL},
{FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
_NonStandardParameter},
{FNAME("gatekeeperIdentifier") BMPSTR, 7, 1, 0, SKIP | OPT, 0, NULL},
{FNAME("rasAddress") CHOICE, 3, 7, 7, DECODE | EXT,
offsetof(GatekeeperConfirm, rasAddress), _TransportAddress},
{FNAME("alternateGatekeeper") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("authenticationMode") CHOICE, 3, 7, 8, STOP | EXT | OPT, 0,
NULL},
{FNAME("tokens") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("cryptoTokens") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("algorithmOID") OID, BYTE, 0, 0, STOP | OPT, 0, NULL},
{FNAME("integrity") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("integrityCheckValue") SEQ, 0, 2, 2, STOP | OPT, 0, NULL},
{FNAME("featureSet") SEQ, 3, 4, 4, STOP | EXT | OPT, 0, NULL},
{FNAME("genericData") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
};
static const struct field_t _RegistrationRequest_callSignalAddress[] = { /* SEQUENCE OF */
{FNAME("item") CHOICE, 3, 7, 7, DECODE | EXT,
sizeof(TransportAddress), _TransportAddress}
,
};
static const struct field_t _RegistrationRequest_rasAddress[] = { /* SEQUENCE OF */
{FNAME("item") CHOICE, 3, 7, 7, DECODE | EXT,
sizeof(TransportAddress), _TransportAddress}
,
};
static const struct field_t _RegistrationRequest_terminalAlias[] = { /* SEQUENCE OF */
{FNAME("item") CHOICE, 1, 2, 7, SKIP | EXT, 0, _AliasAddress},
};
static const struct field_t _RegistrationRequest[] = { /* SEQUENCE */
{FNAME("requestSeqNum") INT, WORD, 1, 0, SKIP, 0, NULL},
{FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
_NonStandardParameter},
{FNAME("discoveryComplete") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("callSignalAddress") SEQOF, SEMI, 0, 10, DECODE,
offsetof(RegistrationRequest, callSignalAddress),
_RegistrationRequest_callSignalAddress},
{FNAME("rasAddress") SEQOF, SEMI, 0, 10, DECODE,
offsetof(RegistrationRequest, rasAddress),
_RegistrationRequest_rasAddress},
{FNAME("terminalType") SEQ, 6, 8, 10, SKIP | EXT, 0, _EndpointType},
{FNAME("terminalAlias") SEQOF, SEMI, 0, 0, SKIP | OPT, 0,
_RegistrationRequest_terminalAlias},
{FNAME("gatekeeperIdentifier") BMPSTR, 7, 1, 0, SKIP | OPT, 0, NULL},
{FNAME("endpointVendor") SEQ, 2, 3, 3, SKIP | EXT, 0,
_VendorIdentifier},
{FNAME("alternateEndpoints") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("timeToLive") INT, CONS, 1, 0, DECODE | OPT,
offsetof(RegistrationRequest, timeToLive), NULL},
{FNAME("tokens") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("cryptoTokens") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("integrityCheckValue") SEQ, 0, 2, 2, STOP | OPT, 0, NULL},
{FNAME("keepAlive") BOOL, FIXD, 0, 0, STOP, 0, NULL},
{FNAME("endpointIdentifier") BMPSTR, 7, 1, 0, STOP | OPT, 0, NULL},
{FNAME("willSupplyUUIEs") BOOL, FIXD, 0, 0, STOP, 0, NULL},
{FNAME("maintainConnection") BOOL, FIXD, 0, 0, STOP, 0, NULL},
{FNAME("alternateTransportAddresses") SEQ, 1, 1, 1, STOP | EXT | OPT,
0, NULL},
{FNAME("additiveRegistration") NUL, FIXD, 0, 0, STOP | OPT, 0, NULL},
{FNAME("terminalAliasPattern") SEQOF, SEMI, 0, 0, STOP | OPT, 0,
NULL},
{FNAME("supportsAltGK") NUL, FIXD, 0, 0, STOP | OPT, 0, NULL},
{FNAME("usageReportingCapability") SEQ, 3, 4, 4, STOP | EXT | OPT, 0,
NULL},
{FNAME("multipleCalls") BOOL, FIXD, 0, 0, STOP | OPT, 0, NULL},
{FNAME("supportedH248Packages") SEQOF, SEMI, 0, 0, STOP | OPT, 0,
NULL},
{FNAME("callCreditCapability") SEQ, 2, 2, 2, STOP | EXT | OPT, 0,
NULL},
{FNAME("capacityReportingCapability") SEQ, 0, 1, 1, STOP | EXT | OPT,
0, NULL},
{FNAME("capacity") SEQ, 2, 2, 2, STOP | EXT | OPT, 0, NULL},
{FNAME("featureSet") SEQ, 3, 4, 4, STOP | EXT | OPT, 0, NULL},
{FNAME("genericData") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
};
static const struct field_t _RegistrationConfirm_callSignalAddress[] = { /* SEQUENCE OF */
{FNAME("item") CHOICE, 3, 7, 7, DECODE | EXT,
sizeof(TransportAddress), _TransportAddress}
,
};
static const struct field_t _RegistrationConfirm_terminalAlias[] = { /* SEQUENCE OF */
{FNAME("item") CHOICE, 1, 2, 7, SKIP | EXT, 0, _AliasAddress},
};
static const struct field_t _RegistrationConfirm[] = { /* SEQUENCE */
{FNAME("requestSeqNum") INT, WORD, 1, 0, SKIP, 0, NULL},
{FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL},
{FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
_NonStandardParameter},
{FNAME("callSignalAddress") SEQOF, SEMI, 0, 10, DECODE,
offsetof(RegistrationConfirm, callSignalAddress),
_RegistrationConfirm_callSignalAddress},
{FNAME("terminalAlias") SEQOF, SEMI, 0, 0, SKIP | OPT, 0,
_RegistrationConfirm_terminalAlias},
{FNAME("gatekeeperIdentifier") BMPSTR, 7, 1, 0, SKIP | OPT, 0, NULL},
{FNAME("endpointIdentifier") BMPSTR, 7, 1, 0, SKIP, 0, NULL},
{FNAME("alternateGatekeeper") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
{FNAME("timeToLive") INT, CONS, 1, 0, DECODE | OPT,
offsetof(RegistrationConfirm, timeToLive), NULL},
{FNAME("tokens") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("cryptoTokens") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("integrityCheckValue") SEQ, 0, 2, 2, STOP | OPT, 0, NULL},
{FNAME("willRespondToIRR") BOOL, FIXD, 0, 0, STOP, 0, NULL},
{FNAME("preGrantedARQ") SEQ, 0, 4, 8, STOP | EXT | OPT, 0, NULL},
{FNAME("maintainConnection") BOOL, FIXD, 0, 0, STOP, 0, NULL},
{FNAME("serviceControl") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("supportsAdditiveRegistration") NUL, FIXD, 0, 0, STOP | OPT, 0,
NULL},
{FNAME("terminalAliasPattern") SEQOF, SEMI, 0, 0, STOP | OPT, 0,
NULL},
{FNAME("supportedPrefixes") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("usageSpec") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("featureServerAlias") CHOICE, 1, 2, 7, STOP | EXT | OPT, 0,
NULL},
{FNAME("capacityReportingSpec") SEQ, 0, 1, 1, STOP | EXT | OPT, 0,
NULL},
{FNAME("featureSet") SEQ, 3, 4, 4, STOP | EXT | OPT, 0, NULL},
{FNAME("genericData") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
};
static const struct field_t _UnregistrationRequest_callSignalAddress[] = { /* SEQUENCE OF */
{FNAME("item") CHOICE, 3, 7, 7, DECODE | EXT,
sizeof(TransportAddress), _TransportAddress}
,
};
static const struct field_t _UnregistrationRequest[] = { /* SEQUENCE */
{FNAME("requestSeqNum") INT, WORD, 1, 0, SKIP, 0, NULL},
{FNAME("callSignalAddress") SEQOF, SEMI, 0, 10, DECODE,
offsetof(UnregistrationRequest, callSignalAddress),
_UnregistrationRequest_callSignalAddress},
{FNAME("endpointAlias") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("nonStandardData") SEQ, 0, 2, 2, STOP | OPT, 0, NULL},
{FNAME("endpointIdentifier") BMPSTR, 7, 1, 0, STOP | OPT, 0, NULL},
{FNAME("alternateEndpoints") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("gatekeeperIdentifier") BMPSTR, 7, 1, 0, STOP | OPT, 0, NULL},
{FNAME("tokens") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("cryptoTokens") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("integrityCheckValue") SEQ, 0, 2, 2, STOP | OPT, 0, NULL},
{FNAME("reason") CHOICE, 2, 4, 5, STOP | EXT | OPT, 0, NULL},
{FNAME("endpointAliasPattern") SEQOF, SEMI, 0, 0, STOP | OPT, 0,
NULL},
{FNAME("supportedPrefixes") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("alternateGatekeeper") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("genericData") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
};
static const struct field_t _CallModel[] = { /* CHOICE */
{FNAME("direct") NUL, FIXD, 0, 0, SKIP, 0, NULL},
{FNAME("gatekeeperRouted") NUL, FIXD, 0, 0, SKIP, 0, NULL},
};
static const struct field_t _AdmissionRequest_destinationInfo[] = { /* SEQUENCE OF */
{FNAME("item") CHOICE, 1, 2, 7, SKIP | EXT, 0, _AliasAddress},
};
static const struct field_t _AdmissionRequest_destExtraCallInfo[] = { /* SEQUENCE OF */
{FNAME("item") CHOICE, 1, 2, 7, SKIP | EXT, 0, _AliasAddress},
};
static const struct field_t _AdmissionRequest_srcInfo[] = { /* SEQUENCE OF */
{FNAME("item") CHOICE, 1, 2, 7, SKIP | EXT, 0, _AliasAddress},
};
static const struct field_t _AdmissionRequest[] = { /* SEQUENCE */
{FNAME("requestSeqNum") INT, WORD, 1, 0, SKIP, 0, NULL},
{FNAME("callType") CHOICE, 2, 4, 4, SKIP | EXT, 0, _CallType},
{FNAME("callModel") CHOICE, 1, 2, 2, SKIP | EXT | OPT, 0, _CallModel},
{FNAME("endpointIdentifier") BMPSTR, 7, 1, 0, SKIP, 0, NULL},
{FNAME("destinationInfo") SEQOF, SEMI, 0, 0, SKIP | OPT, 0,
_AdmissionRequest_destinationInfo},
{FNAME("destCallSignalAddress") CHOICE, 3, 7, 7, DECODE | EXT | OPT,
offsetof(AdmissionRequest, destCallSignalAddress),
_TransportAddress},
{FNAME("destExtraCallInfo") SEQOF, SEMI, 0, 0, SKIP | OPT, 0,
_AdmissionRequest_destExtraCallInfo},
{FNAME("srcInfo") SEQOF, SEMI, 0, 0, SKIP, 0,
_AdmissionRequest_srcInfo},
{FNAME("srcCallSignalAddress") CHOICE, 3, 7, 7, DECODE | EXT | OPT,
offsetof(AdmissionRequest, srcCallSignalAddress), _TransportAddress},
{FNAME("bandWidth") INT, CONS, 0, 0, STOP, 0, NULL},
{FNAME("callReferenceValue") INT, WORD, 0, 0, STOP, 0, NULL},
{FNAME("nonStandardData") SEQ, 0, 2, 2, STOP | OPT, 0, NULL},
{FNAME("callServices") SEQ, 0, 8, 8, STOP | EXT | OPT, 0, NULL},
{FNAME("conferenceID") OCTSTR, FIXD, 16, 0, STOP, 0, NULL},
{FNAME("activeMC") BOOL, FIXD, 0, 0, STOP, 0, NULL},
{FNAME("answerCall") BOOL, FIXD, 0, 0, STOP, 0, NULL},
{FNAME("canMapAlias") BOOL, FIXD, 0, 0, STOP, 0, NULL},
{FNAME("callIdentifier") SEQ, 0, 1, 1, STOP | EXT, 0, NULL},
{FNAME("srcAlternatives") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("destAlternatives") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("gatekeeperIdentifier") BMPSTR, 7, 1, 0, STOP | OPT, 0, NULL},
{FNAME("tokens") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("cryptoTokens") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("integrityCheckValue") SEQ, 0, 2, 2, STOP | OPT, 0, NULL},
{FNAME("transportQOS") CHOICE, 2, 3, 3, STOP | EXT | OPT, 0, NULL},
{FNAME("willSupplyUUIEs") BOOL, FIXD, 0, 0, STOP, 0, NULL},
{FNAME("callLinkage") SEQ, 2, 2, 2, STOP | EXT | OPT, 0, NULL},
{FNAME("gatewayDataRate") SEQ, 2, 3, 3, STOP | EXT | OPT, 0, NULL},
{FNAME("capacity") SEQ, 2, 2, 2, STOP | EXT | OPT, 0, NULL},
{FNAME("circuitInfo") SEQ, 3, 3, 3, STOP | EXT | OPT, 0, NULL},
{FNAME("desiredProtocols") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("desiredTunnelledProtocol") SEQ, 1, 2, 2, STOP | EXT | OPT, 0,
NULL},
{FNAME("featureSet") SEQ, 3, 4, 4, STOP | EXT | OPT, 0, NULL},
{FNAME("genericData") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
};
static const struct field_t _AdmissionConfirm[] = { /* SEQUENCE */
{FNAME("requestSeqNum") INT, WORD, 1, 0, SKIP, 0, NULL},
{FNAME("bandWidth") INT, CONS, 0, 0, SKIP, 0, NULL},
{FNAME("callModel") CHOICE, 1, 2, 2, SKIP | EXT, 0, _CallModel},
{FNAME("destCallSignalAddress") CHOICE, 3, 7, 7, DECODE | EXT,
offsetof(AdmissionConfirm, destCallSignalAddress),
_TransportAddress},
{FNAME("irrFrequency") INT, WORD, 1, 0, STOP | OPT, 0, NULL},
{FNAME("nonStandardData") SEQ, 0, 2, 2, STOP | OPT, 0, NULL},
{FNAME("destinationInfo") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("destExtraCallInfo") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("destinationType") SEQ, 6, 8, 10, STOP | EXT | OPT, 0, NULL},
{FNAME("remoteExtensionAddress") SEQOF, SEMI, 0, 0, STOP | OPT, 0,
NULL},
{FNAME("alternateEndpoints") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("tokens") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("cryptoTokens") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("integrityCheckValue") SEQ, 0, 2, 2, STOP | OPT, 0, NULL},
{FNAME("transportQOS") CHOICE, 2, 3, 3, STOP | EXT | OPT, 0, NULL},
{FNAME("willRespondToIRR") BOOL, FIXD, 0, 0, STOP, 0, NULL},
{FNAME("uuiesRequested") SEQ, 0, 9, 13, STOP | EXT, 0, NULL},
{FNAME("language") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("alternateTransportAddresses") SEQ, 1, 1, 1, STOP | EXT | OPT,
0, NULL},
{FNAME("useSpecifiedTransport") CHOICE, 1, 2, 2, STOP | EXT | OPT, 0,
NULL},
{FNAME("circuitInfo") SEQ, 3, 3, 3, STOP | EXT | OPT, 0, NULL},
{FNAME("usageSpec") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("supportedProtocols") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("serviceControl") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("multipleCalls") BOOL, FIXD, 0, 0, STOP | OPT, 0, NULL},
{FNAME("featureSet") SEQ, 3, 4, 4, STOP | EXT | OPT, 0, NULL},
{FNAME("genericData") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
};
static const struct field_t _LocationRequest_destinationInfo[] = { /* SEQUENCE OF */
{FNAME("item") CHOICE, 1, 2, 7, SKIP | EXT, 0, _AliasAddress},
};
static const struct field_t _LocationRequest[] = { /* SEQUENCE */
{FNAME("requestSeqNum") INT, WORD, 1, 0, SKIP, 0, NULL},
{FNAME("endpointIdentifier") BMPSTR, 7, 1, 0, SKIP | OPT, 0, NULL},
{FNAME("destinationInfo") SEQOF, SEMI, 0, 0, SKIP, 0,
_LocationRequest_destinationInfo},
{FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
_NonStandardParameter},
{FNAME("replyAddress") CHOICE, 3, 7, 7, DECODE | EXT,
offsetof(LocationRequest, replyAddress), _TransportAddress},
{FNAME("sourceInfo") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("canMapAlias") BOOL, FIXD, 0, 0, STOP, 0, NULL},
{FNAME("gatekeeperIdentifier") BMPSTR, 7, 1, 0, STOP | OPT, 0, NULL},
{FNAME("tokens") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("cryptoTokens") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("integrityCheckValue") SEQ, 0, 2, 2, STOP | OPT, 0, NULL},
{FNAME("desiredProtocols") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("desiredTunnelledProtocol") SEQ, 1, 2, 2, STOP | EXT | OPT, 0,
NULL},
{FNAME("featureSet") SEQ, 3, 4, 4, STOP | EXT | OPT, 0, NULL},
{FNAME("genericData") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("hopCount") INT, 8, 1, 0, STOP | OPT, 0, NULL},
{FNAME("circuitInfo") SEQ, 3, 3, 3, STOP | EXT | OPT, 0, NULL},
};
static const struct field_t _LocationConfirm[] = { /* SEQUENCE */
{FNAME("requestSeqNum") INT, WORD, 1, 0, SKIP, 0, NULL},
{FNAME("callSignalAddress") CHOICE, 3, 7, 7, DECODE | EXT,
offsetof(LocationConfirm, callSignalAddress), _TransportAddress},
{FNAME("rasAddress") CHOICE, 3, 7, 7, DECODE | EXT,
offsetof(LocationConfirm, rasAddress), _TransportAddress},
{FNAME("nonStandardData") SEQ, 0, 2, 2, STOP | OPT, 0, NULL},
{FNAME("destinationInfo") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("destExtraCallInfo") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("destinationType") SEQ, 6, 8, 10, STOP | EXT | OPT, 0, NULL},
{FNAME("remoteExtensionAddress") SEQOF, SEMI, 0, 0, STOP | OPT, 0,
NULL},
{FNAME("alternateEndpoints") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("tokens") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("cryptoTokens") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("integrityCheckValue") SEQ, 0, 2, 2, STOP | OPT, 0, NULL},
{FNAME("alternateTransportAddresses") SEQ, 1, 1, 1, STOP | EXT | OPT,
0, NULL},
{FNAME("supportedProtocols") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("multipleCalls") BOOL, FIXD, 0, 0, STOP | OPT, 0, NULL},
{FNAME("featureSet") SEQ, 3, 4, 4, STOP | EXT | OPT, 0, NULL},
{FNAME("genericData") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("circuitInfo") SEQ, 3, 3, 3, STOP | EXT | OPT, 0, NULL},
{FNAME("serviceControl") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
};
static const struct field_t _InfoRequestResponse_callSignalAddress[] = { /* SEQUENCE OF */
{FNAME("item") CHOICE, 3, 7, 7, DECODE | EXT,
sizeof(TransportAddress), _TransportAddress}
,
};
static const struct field_t _InfoRequestResponse[] = { /* SEQUENCE */
{FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
_NonStandardParameter},
{FNAME("requestSeqNum") INT, WORD, 1, 0, SKIP, 0, NULL},
{FNAME("endpointType") SEQ, 6, 8, 10, SKIP | EXT, 0, _EndpointType},
{FNAME("endpointIdentifier") BMPSTR, 7, 1, 0, SKIP, 0, NULL},
{FNAME("rasAddress") CHOICE, 3, 7, 7, DECODE | EXT,
offsetof(InfoRequestResponse, rasAddress), _TransportAddress},
{FNAME("callSignalAddress") SEQOF, SEMI, 0, 10, DECODE,
offsetof(InfoRequestResponse, callSignalAddress),
_InfoRequestResponse_callSignalAddress},
{FNAME("endpointAlias") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("perCallInfo") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("tokens") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("cryptoTokens") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
{FNAME("integrityCheckValue") SEQ, 0, 2, 2, STOP | OPT, 0, NULL},
{FNAME("needResponse") BOOL, FIXD, 0, 0, STOP, 0, NULL},
{FNAME("capacity") SEQ, 2, 2, 2, STOP | EXT | OPT, 0, NULL},
{FNAME("irrStatus") CHOICE, 2, 4, 4, STOP | EXT | OPT, 0, NULL},
{FNAME("unsolicited") BOOL, FIXD, 0, 0, STOP, 0, NULL},
{FNAME("genericData") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
};
static const struct field_t _RasMessage[] = { /* CHOICE */
{FNAME("gatekeeperRequest") SEQ, 4, 8, 18, DECODE | EXT,
offsetof(RasMessage, gatekeeperRequest), _GatekeeperRequest},
{FNAME("gatekeeperConfirm") SEQ, 2, 5, 14, DECODE | EXT,
offsetof(RasMessage, gatekeeperConfirm), _GatekeeperConfirm},
{FNAME("gatekeeperReject") SEQ, 2, 5, 11, STOP | EXT, 0, NULL},
{FNAME("registrationRequest") SEQ, 3, 10, 31, DECODE | EXT,
offsetof(RasMessage, registrationRequest), _RegistrationRequest},
{FNAME("registrationConfirm") SEQ, 3, 7, 24, DECODE | EXT,
offsetof(RasMessage, registrationConfirm), _RegistrationConfirm},
{FNAME("registrationReject") SEQ, 2, 5, 11, STOP | EXT, 0, NULL},
{FNAME("unregistrationRequest") SEQ, 3, 5, 15, DECODE | EXT,
offsetof(RasMessage, unregistrationRequest), _UnregistrationRequest},
{FNAME("unregistrationConfirm") SEQ, 1, 2, 6, STOP | EXT, 0, NULL},
{FNAME("unregistrationReject") SEQ, 1, 3, 8, STOP | EXT, 0, NULL},
{FNAME("admissionRequest") SEQ, 7, 16, 34, DECODE | EXT,
offsetof(RasMessage, admissionRequest), _AdmissionRequest},
{FNAME("admissionConfirm") SEQ, 2, 6, 27, DECODE | EXT,
offsetof(RasMessage, admissionConfirm), _AdmissionConfirm},
{FNAME("admissionReject") SEQ, 1, 3, 11, STOP | EXT, 0, NULL},
{FNAME("bandwidthRequest") SEQ, 2, 7, 18, STOP | EXT, 0, NULL},
{FNAME("bandwidthConfirm") SEQ, 1, 3, 8, STOP | EXT, 0, NULL},
{FNAME("bandwidthReject") SEQ, 1, 4, 9, STOP | EXT, 0, NULL},
{FNAME("disengageRequest") SEQ, 1, 6, 19, STOP | EXT, 0, NULL},
{FNAME("disengageConfirm") SEQ, 1, 2, 9, STOP | EXT, 0, NULL},
{FNAME("disengageReject") SEQ, 1, 3, 8, STOP | EXT, 0, NULL},
{FNAME("locationRequest") SEQ, 2, 5, 17, DECODE | EXT,
offsetof(RasMessage, locationRequest), _LocationRequest},
{FNAME("locationConfirm") SEQ, 1, 4, 19, DECODE | EXT,
offsetof(RasMessage, locationConfirm), _LocationConfirm},
{FNAME("locationReject") SEQ, 1, 3, 10, STOP | EXT, 0, NULL},
{FNAME("infoRequest") SEQ, 2, 4, 15, STOP | EXT, 0, NULL},
{FNAME("infoRequestResponse") SEQ, 3, 8, 16, DECODE | EXT,
offsetof(RasMessage, infoRequestResponse), _InfoRequestResponse},
{FNAME("nonStandardMessage") SEQ, 0, 2, 7, STOP | EXT, 0, NULL},
{FNAME("unknownMessageResponse") SEQ, 0, 1, 5, STOP | EXT, 0, NULL},
{FNAME("requestInProgress") SEQ, 4, 6, 6, STOP | EXT, 0, NULL},
{FNAME("resourcesAvailableIndicate") SEQ, 4, 9, 11, STOP | EXT, 0,
NULL},
{FNAME("resourcesAvailableConfirm") SEQ, 4, 6, 7, STOP | EXT, 0,
NULL},
{FNAME("infoRequestAck") SEQ, 4, 5, 5, STOP | EXT, 0, NULL},
{FNAME("infoRequestNak") SEQ, 5, 7, 7, STOP | EXT, 0, NULL},
{FNAME("serviceControlIndication") SEQ, 8, 10, 10, STOP | EXT, 0,
NULL},
{FNAME("serviceControlResponse") SEQ, 7, 8, 8, STOP | EXT, 0, NULL},
};
| linux-master | net/netfilter/nf_conntrack_h323_types.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* xt_u32 - kernel module to match u32 packet content
*
* Original author: Don Cohen <[email protected]>
* (C) CC Computer Consultants GmbH, 2007
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/types.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_u32.h>
static bool u32_match_it(const struct xt_u32 *data,
const struct sk_buff *skb)
{
const struct xt_u32_test *ct;
unsigned int testind;
unsigned int nnums;
unsigned int nvals;
unsigned int i;
__be32 n;
u_int32_t pos;
u_int32_t val;
u_int32_t at;
/*
* Small example: "0 >> 28 == 4 && 8 & 0xFF0000 >> 16 = 6, 17"
* (=IPv4 and (TCP or UDP)). Outer loop runs over the "&&" operands.
*/
for (testind = 0; testind < data->ntests; ++testind) {
ct = &data->tests[testind];
at = 0;
pos = ct->location[0].number;
if (skb->len < 4 || pos > skb->len - 4)
return false;
if (skb_copy_bits(skb, pos, &n, sizeof(n)) < 0)
BUG();
val = ntohl(n);
nnums = ct->nnums;
/* Inner loop runs over "&", "<<", ">>" and "@" operands */
for (i = 1; i < nnums; ++i) {
u_int32_t number = ct->location[i].number;
switch (ct->location[i].nextop) {
case XT_U32_AND:
val &= number;
break;
case XT_U32_LEFTSH:
val <<= number;
break;
case XT_U32_RIGHTSH:
val >>= number;
break;
case XT_U32_AT:
if (at + val < at)
return false;
at += val;
pos = number;
if (at + 4 < at || skb->len < at + 4 ||
pos > skb->len - at - 4)
return false;
if (skb_copy_bits(skb, at + pos, &n,
sizeof(n)) < 0)
BUG();
val = ntohl(n);
break;
}
}
/* Run over the "," and ":" operands */
nvals = ct->nvalues;
for (i = 0; i < nvals; ++i)
if (ct->value[i].min <= val && val <= ct->value[i].max)
break;
if (i >= ct->nvalues)
return false;
}
return true;
}
static bool u32_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_u32 *data = par->matchinfo;
bool ret;
ret = u32_match_it(data, skb);
return ret ^ data->invert;
}
static int u32_mt_checkentry(const struct xt_mtchk_param *par)
{
const struct xt_u32 *data = par->matchinfo;
const struct xt_u32_test *ct;
unsigned int i;
if (data->ntests > ARRAY_SIZE(data->tests))
return -EINVAL;
for (i = 0; i < data->ntests; ++i) {
ct = &data->tests[i];
if (ct->nnums > ARRAY_SIZE(ct->location) ||
ct->nvalues > ARRAY_SIZE(ct->value))
return -EINVAL;
}
return 0;
}
static struct xt_match xt_u32_mt_reg __read_mostly = {
.name = "u32",
.revision = 0,
.family = NFPROTO_UNSPEC,
.match = u32_mt,
.checkentry = u32_mt_checkentry,
.matchsize = sizeof(struct xt_u32),
.me = THIS_MODULE,
};
static int __init u32_mt_init(void)
{
return xt_register_match(&xt_u32_mt_reg);
}
static void __exit u32_mt_exit(void)
{
xt_unregister_match(&xt_u32_mt_reg);
}
module_init(u32_mt_init);
module_exit(u32_mt_exit);
MODULE_AUTHOR("Jan Engelhardt <[email protected]>");
MODULE_DESCRIPTION("Xtables: arbitrary byte matching");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_u32");
MODULE_ALIAS("ip6t_u32");
| linux-master | net/netfilter/xt_u32.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Connection tracking support for PPTP (Point to Point Tunneling Protocol).
* PPTP is a protocol for creating virtual private networks.
* It is a specification defined by Microsoft and some vendors
* working with Microsoft. PPTP is built on top of a modified
* version of the Internet Generic Routing Encapsulation Protocol.
* GRE is defined in RFC 1701 and RFC 1702. Documentation of
* PPTP can be found in RFC 2637
*
* (C) 2000-2005 by Harald Welte <[email protected]>
*
* Development of this code funded by Astaro AG (http://www.astaro.com/)
*
* (C) 2006-2012 Patrick McHardy <[email protected]>
*
* Limitations:
* - We blindly assume that control connections are always
* established in PNS->PAC direction. This is a violation
* of RFC 2637
* - We can only support one single call within each session
* TODO:
* - testing of incoming PPTP calls
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/in.h>
#include <linux/tcp.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <linux/netfilter/nf_conntrack_proto_gre.h>
#include <linux/netfilter/nf_conntrack_pptp.h>
#define NF_CT_PPTP_VERSION "3.1"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <[email protected]>");
MODULE_DESCRIPTION("Netfilter connection tracking helper module for PPTP");
MODULE_ALIAS("ip_conntrack_pptp");
MODULE_ALIAS_NFCT_HELPER("pptp");
static DEFINE_SPINLOCK(nf_pptp_lock);
const struct nf_nat_pptp_hook __rcu *nf_nat_pptp_hook;
EXPORT_SYMBOL_GPL(nf_nat_pptp_hook);
#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
/* PptpControlMessageType names */
static const char *const pptp_msg_name_array[PPTP_MSG_MAX + 1] = {
[0] = "UNKNOWN_MESSAGE",
[PPTP_START_SESSION_REQUEST] = "START_SESSION_REQUEST",
[PPTP_START_SESSION_REPLY] = "START_SESSION_REPLY",
[PPTP_STOP_SESSION_REQUEST] = "STOP_SESSION_REQUEST",
[PPTP_STOP_SESSION_REPLY] = "STOP_SESSION_REPLY",
[PPTP_ECHO_REQUEST] = "ECHO_REQUEST",
[PPTP_ECHO_REPLY] = "ECHO_REPLY",
[PPTP_OUT_CALL_REQUEST] = "OUT_CALL_REQUEST",
[PPTP_OUT_CALL_REPLY] = "OUT_CALL_REPLY",
[PPTP_IN_CALL_REQUEST] = "IN_CALL_REQUEST",
[PPTP_IN_CALL_REPLY] = "IN_CALL_REPLY",
[PPTP_IN_CALL_CONNECT] = "IN_CALL_CONNECT",
[PPTP_CALL_CLEAR_REQUEST] = "CALL_CLEAR_REQUEST",
[PPTP_CALL_DISCONNECT_NOTIFY] = "CALL_DISCONNECT_NOTIFY",
[PPTP_WAN_ERROR_NOTIFY] = "WAN_ERROR_NOTIFY",
[PPTP_SET_LINK_INFO] = "SET_LINK_INFO"
};
const char *pptp_msg_name(u_int16_t msg)
{
if (msg > PPTP_MSG_MAX)
return pptp_msg_name_array[0];
return pptp_msg_name_array[msg];
}
EXPORT_SYMBOL(pptp_msg_name);
#endif
#define SECS *HZ
#define MINS * 60 SECS
#define HOURS * 60 MINS
#define PPTP_GRE_TIMEOUT (10 MINS)
#define PPTP_GRE_STREAM_TIMEOUT (5 HOURS)
static void pptp_expectfn(struct nf_conn *ct,
struct nf_conntrack_expect *exp)
{
const struct nf_nat_pptp_hook *hook;
struct net *net = nf_ct_net(ct);
pr_debug("increasing timeouts\n");
/* increase timeout of GRE data channel conntrack entry */
ct->proto.gre.timeout = PPTP_GRE_TIMEOUT;
ct->proto.gre.stream_timeout = PPTP_GRE_STREAM_TIMEOUT;
/* Can you see how rusty this code is, compared with the pre-2.6.11
* one? That's what happened to my shiny newnat of 2002 ;( -HW */
hook = rcu_dereference(nf_nat_pptp_hook);
if (hook && ct->master->status & IPS_NAT_MASK)
hook->expectfn(ct, exp);
else {
struct nf_conntrack_tuple inv_t;
struct nf_conntrack_expect *exp_other;
/* obviously this tuple inversion only works until you do NAT */
nf_ct_invert_tuple(&inv_t, &exp->tuple);
pr_debug("trying to unexpect other dir: ");
nf_ct_dump_tuple(&inv_t);
exp_other = nf_ct_expect_find_get(net, nf_ct_zone(ct), &inv_t);
if (exp_other) {
/* delete other expectation. */
pr_debug("found\n");
nf_ct_unexpect_related(exp_other);
nf_ct_expect_put(exp_other);
} else {
pr_debug("not found\n");
}
}
}
static int destroy_sibling_or_exp(struct net *net, struct nf_conn *ct,
const struct nf_conntrack_tuple *t)
{
const struct nf_conntrack_tuple_hash *h;
const struct nf_conntrack_zone *zone;
struct nf_conntrack_expect *exp;
struct nf_conn *sibling;
pr_debug("trying to timeout ct or exp for tuple ");
nf_ct_dump_tuple(t);
zone = nf_ct_zone(ct);
h = nf_conntrack_find_get(net, zone, t);
if (h) {
sibling = nf_ct_tuplehash_to_ctrack(h);
pr_debug("setting timeout of conntrack %p to 0\n", sibling);
sibling->proto.gre.timeout = 0;
sibling->proto.gre.stream_timeout = 0;
nf_ct_kill(sibling);
nf_ct_put(sibling);
return 1;
} else {
exp = nf_ct_expect_find_get(net, zone, t);
if (exp) {
pr_debug("unexpect_related of expect %p\n", exp);
nf_ct_unexpect_related(exp);
nf_ct_expect_put(exp);
return 1;
}
}
return 0;
}
/* timeout GRE data connections */
static void pptp_destroy_siblings(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
const struct nf_ct_pptp_master *ct_pptp_info = nfct_help_data(ct);
struct nf_conntrack_tuple t;
nf_ct_gre_keymap_destroy(ct);
/* try original (pns->pac) tuple */
memcpy(&t, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, sizeof(t));
t.dst.protonum = IPPROTO_GRE;
t.src.u.gre.key = ct_pptp_info->pns_call_id;
t.dst.u.gre.key = ct_pptp_info->pac_call_id;
if (!destroy_sibling_or_exp(net, ct, &t))
pr_debug("failed to timeout original pns->pac ct/exp\n");
/* try reply (pac->pns) tuple */
memcpy(&t, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, sizeof(t));
t.dst.protonum = IPPROTO_GRE;
t.src.u.gre.key = ct_pptp_info->pac_call_id;
t.dst.u.gre.key = ct_pptp_info->pns_call_id;
if (!destroy_sibling_or_exp(net, ct, &t))
pr_debug("failed to timeout reply pac->pns ct/exp\n");
}
/* expect GRE connections (PNS->PAC and PAC->PNS direction) */
static int exp_gre(struct nf_conn *ct, __be16 callid, __be16 peer_callid)
{
struct nf_conntrack_expect *exp_orig, *exp_reply;
const struct nf_nat_pptp_hook *hook;
enum ip_conntrack_dir dir;
int ret = 1;
exp_orig = nf_ct_expect_alloc(ct);
if (exp_orig == NULL)
goto out;
exp_reply = nf_ct_expect_alloc(ct);
if (exp_reply == NULL)
goto out_put_orig;
/* original direction, PNS->PAC */
dir = IP_CT_DIR_ORIGINAL;
nf_ct_expect_init(exp_orig, NF_CT_EXPECT_CLASS_DEFAULT,
nf_ct_l3num(ct),
&ct->tuplehash[dir].tuple.src.u3,
&ct->tuplehash[dir].tuple.dst.u3,
IPPROTO_GRE, &peer_callid, &callid);
exp_orig->expectfn = pptp_expectfn;
/* reply direction, PAC->PNS */
dir = IP_CT_DIR_REPLY;
nf_ct_expect_init(exp_reply, NF_CT_EXPECT_CLASS_DEFAULT,
nf_ct_l3num(ct),
&ct->tuplehash[dir].tuple.src.u3,
&ct->tuplehash[dir].tuple.dst.u3,
IPPROTO_GRE, &callid, &peer_callid);
exp_reply->expectfn = pptp_expectfn;
hook = rcu_dereference(nf_nat_pptp_hook);
if (hook && ct->status & IPS_NAT_MASK)
hook->exp_gre(exp_orig, exp_reply);
if (nf_ct_expect_related(exp_orig, 0) != 0)
goto out_put_both;
if (nf_ct_expect_related(exp_reply, 0) != 0)
goto out_unexpect_orig;
/* Add GRE keymap entries */
if (nf_ct_gre_keymap_add(ct, IP_CT_DIR_ORIGINAL, &exp_orig->tuple) != 0)
goto out_unexpect_both;
if (nf_ct_gre_keymap_add(ct, IP_CT_DIR_REPLY, &exp_reply->tuple) != 0) {
nf_ct_gre_keymap_destroy(ct);
goto out_unexpect_both;
}
ret = 0;
out_put_both:
nf_ct_expect_put(exp_reply);
out_put_orig:
nf_ct_expect_put(exp_orig);
out:
return ret;
out_unexpect_both:
nf_ct_unexpect_related(exp_reply);
out_unexpect_orig:
nf_ct_unexpect_related(exp_orig);
goto out_put_both;
}
static int
pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
struct PptpControlHeader *ctlh,
union pptp_ctrl_union *pptpReq,
unsigned int reqlen,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo)
{
struct nf_ct_pptp_master *info = nfct_help_data(ct);
const struct nf_nat_pptp_hook *hook;
u_int16_t msg;
__be16 cid = 0, pcid = 0;
msg = ntohs(ctlh->messageType);
pr_debug("inbound control message %s\n", pptp_msg_name(msg));
switch (msg) {
case PPTP_START_SESSION_REPLY:
/* server confirms new control session */
if (info->sstate < PPTP_SESSION_REQUESTED)
goto invalid;
if (pptpReq->srep.resultCode == PPTP_START_OK)
info->sstate = PPTP_SESSION_CONFIRMED;
else
info->sstate = PPTP_SESSION_ERROR;
break;
case PPTP_STOP_SESSION_REPLY:
/* server confirms end of control session */
if (info->sstate > PPTP_SESSION_STOPREQ)
goto invalid;
if (pptpReq->strep.resultCode == PPTP_STOP_OK)
info->sstate = PPTP_SESSION_NONE;
else
info->sstate = PPTP_SESSION_ERROR;
break;
case PPTP_OUT_CALL_REPLY:
/* server accepted call, we now expect GRE frames */
if (info->sstate != PPTP_SESSION_CONFIRMED)
goto invalid;
if (info->cstate != PPTP_CALL_OUT_REQ &&
info->cstate != PPTP_CALL_OUT_CONF)
goto invalid;
cid = pptpReq->ocack.callID;
pcid = pptpReq->ocack.peersCallID;
if (info->pns_call_id != pcid)
goto invalid;
pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name(msg),
ntohs(cid), ntohs(pcid));
if (pptpReq->ocack.resultCode == PPTP_OUTCALL_CONNECT) {
info->cstate = PPTP_CALL_OUT_CONF;
info->pac_call_id = cid;
exp_gre(ct, cid, pcid);
} else
info->cstate = PPTP_CALL_NONE;
break;
case PPTP_IN_CALL_REQUEST:
/* server tells us about incoming call request */
if (info->sstate != PPTP_SESSION_CONFIRMED)
goto invalid;
cid = pptpReq->icreq.callID;
pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid));
info->cstate = PPTP_CALL_IN_REQ;
info->pac_call_id = cid;
break;
case PPTP_IN_CALL_CONNECT:
/* server tells us about incoming call established */
if (info->sstate != PPTP_SESSION_CONFIRMED)
goto invalid;
if (info->cstate != PPTP_CALL_IN_REP &&
info->cstate != PPTP_CALL_IN_CONF)
goto invalid;
pcid = pptpReq->iccon.peersCallID;
cid = info->pac_call_id;
if (info->pns_call_id != pcid)
goto invalid;
pr_debug("%s, PCID=%X\n", pptp_msg_name(msg), ntohs(pcid));
info->cstate = PPTP_CALL_IN_CONF;
/* we expect a GRE connection from PAC to PNS */
exp_gre(ct, cid, pcid);
break;
case PPTP_CALL_DISCONNECT_NOTIFY:
/* server confirms disconnect */
cid = pptpReq->disc.callID;
pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid));
info->cstate = PPTP_CALL_NONE;
/* untrack this call id, unexpect GRE packets */
pptp_destroy_siblings(ct);
break;
case PPTP_WAN_ERROR_NOTIFY:
case PPTP_SET_LINK_INFO:
case PPTP_ECHO_REQUEST:
case PPTP_ECHO_REPLY:
/* I don't have to explain these ;) */
break;
default:
goto invalid;
}
hook = rcu_dereference(nf_nat_pptp_hook);
if (hook && ct->status & IPS_NAT_MASK)
return hook->inbound(skb, ct, ctinfo, protoff, ctlh, pptpReq);
return NF_ACCEPT;
invalid:
pr_debug("invalid %s: type=%d cid=%u pcid=%u "
"cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n",
pptp_msg_name(msg),
msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate,
ntohs(info->pns_call_id), ntohs(info->pac_call_id));
return NF_ACCEPT;
}
static int
pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff,
struct PptpControlHeader *ctlh,
union pptp_ctrl_union *pptpReq,
unsigned int reqlen,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo)
{
struct nf_ct_pptp_master *info = nfct_help_data(ct);
const struct nf_nat_pptp_hook *hook;
u_int16_t msg;
__be16 cid = 0, pcid = 0;
msg = ntohs(ctlh->messageType);
pr_debug("outbound control message %s\n", pptp_msg_name(msg));
switch (msg) {
case PPTP_START_SESSION_REQUEST:
/* client requests for new control session */
if (info->sstate != PPTP_SESSION_NONE)
goto invalid;
info->sstate = PPTP_SESSION_REQUESTED;
break;
case PPTP_STOP_SESSION_REQUEST:
/* client requests end of control session */
info->sstate = PPTP_SESSION_STOPREQ;
break;
case PPTP_OUT_CALL_REQUEST:
/* client initiating connection to server */
if (info->sstate != PPTP_SESSION_CONFIRMED)
goto invalid;
info->cstate = PPTP_CALL_OUT_REQ;
/* track PNS call id */
cid = pptpReq->ocreq.callID;
pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid));
info->pns_call_id = cid;
break;
case PPTP_IN_CALL_REPLY:
/* client answers incoming call */
if (info->cstate != PPTP_CALL_IN_REQ &&
info->cstate != PPTP_CALL_IN_REP)
goto invalid;
cid = pptpReq->icack.callID;
pcid = pptpReq->icack.peersCallID;
if (info->pac_call_id != pcid)
goto invalid;
pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name(msg),
ntohs(cid), ntohs(pcid));
if (pptpReq->icack.resultCode == PPTP_INCALL_ACCEPT) {
/* part two of the three-way handshake */
info->cstate = PPTP_CALL_IN_REP;
info->pns_call_id = cid;
} else
info->cstate = PPTP_CALL_NONE;
break;
case PPTP_CALL_CLEAR_REQUEST:
/* client requests hangup of call */
if (info->sstate != PPTP_SESSION_CONFIRMED)
goto invalid;
/* FUTURE: iterate over all calls and check if
* call ID is valid. We don't do this without newnat,
* because we only know about last call */
info->cstate = PPTP_CALL_CLEAR_REQ;
break;
case PPTP_SET_LINK_INFO:
case PPTP_ECHO_REQUEST:
case PPTP_ECHO_REPLY:
/* I don't have to explain these ;) */
break;
default:
goto invalid;
}
hook = rcu_dereference(nf_nat_pptp_hook);
if (hook && ct->status & IPS_NAT_MASK)
return hook->outbound(skb, ct, ctinfo, protoff, ctlh, pptpReq);
return NF_ACCEPT;
invalid:
pr_debug("invalid %s: type=%d cid=%u pcid=%u "
"cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n",
pptp_msg_name(msg),
msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate,
ntohs(info->pns_call_id), ntohs(info->pac_call_id));
return NF_ACCEPT;
}
static const unsigned int pptp_msg_size[] = {
[PPTP_START_SESSION_REQUEST] = sizeof(struct PptpStartSessionRequest),
[PPTP_START_SESSION_REPLY] = sizeof(struct PptpStartSessionReply),
[PPTP_STOP_SESSION_REQUEST] = sizeof(struct PptpStopSessionRequest),
[PPTP_STOP_SESSION_REPLY] = sizeof(struct PptpStopSessionReply),
[PPTP_OUT_CALL_REQUEST] = sizeof(struct PptpOutCallRequest),
[PPTP_OUT_CALL_REPLY] = sizeof(struct PptpOutCallReply),
[PPTP_IN_CALL_REQUEST] = sizeof(struct PptpInCallRequest),
[PPTP_IN_CALL_REPLY] = sizeof(struct PptpInCallReply),
[PPTP_IN_CALL_CONNECT] = sizeof(struct PptpInCallConnected),
[PPTP_CALL_CLEAR_REQUEST] = sizeof(struct PptpClearCallRequest),
[PPTP_CALL_DISCONNECT_NOTIFY] = sizeof(struct PptpCallDisconnectNotify),
[PPTP_WAN_ERROR_NOTIFY] = sizeof(struct PptpWanErrorNotify),
[PPTP_SET_LINK_INFO] = sizeof(struct PptpSetLinkInfo),
};
/* track caller id inside control connection, call expect_related */
static int
conntrack_pptp_help(struct sk_buff *skb, unsigned int protoff,
struct nf_conn *ct, enum ip_conntrack_info ctinfo)
{
int dir = CTINFO2DIR(ctinfo);
const struct nf_ct_pptp_master *info = nfct_help_data(ct);
const struct tcphdr *tcph;
struct tcphdr _tcph;
const struct pptp_pkt_hdr *pptph;
struct pptp_pkt_hdr _pptph;
struct PptpControlHeader _ctlh, *ctlh;
union pptp_ctrl_union _pptpReq, *pptpReq;
unsigned int tcplen = skb->len - protoff;
unsigned int datalen, reqlen, nexthdr_off;
int oldsstate, oldcstate;
int ret;
u_int16_t msg;
#if IS_ENABLED(CONFIG_NF_NAT)
if (!nf_ct_is_confirmed(ct) && (ct->status & IPS_NAT_MASK)) {
struct nf_conn_nat *nat = nf_ct_ext_find(ct, NF_CT_EXT_NAT);
if (!nat && !nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC))
return NF_DROP;
}
#endif
/* don't do any tracking before tcp handshake complete */
if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
return NF_ACCEPT;
nexthdr_off = protoff;
tcph = skb_header_pointer(skb, nexthdr_off, sizeof(_tcph), &_tcph);
if (!tcph)
return NF_ACCEPT;
nexthdr_off += tcph->doff * 4;
datalen = tcplen - tcph->doff * 4;
pptph = skb_header_pointer(skb, nexthdr_off, sizeof(_pptph), &_pptph);
if (!pptph) {
pr_debug("no full PPTP header, can't track\n");
return NF_ACCEPT;
}
nexthdr_off += sizeof(_pptph);
datalen -= sizeof(_pptph);
/* if it's not a control message we can't do anything with it */
if (ntohs(pptph->packetType) != PPTP_PACKET_CONTROL ||
ntohl(pptph->magicCookie) != PPTP_MAGIC_COOKIE) {
pr_debug("not a control packet\n");
return NF_ACCEPT;
}
ctlh = skb_header_pointer(skb, nexthdr_off, sizeof(_ctlh), &_ctlh);
if (!ctlh)
return NF_ACCEPT;
nexthdr_off += sizeof(_ctlh);
datalen -= sizeof(_ctlh);
reqlen = datalen;
msg = ntohs(ctlh->messageType);
if (msg > 0 && msg <= PPTP_MSG_MAX && reqlen < pptp_msg_size[msg])
return NF_ACCEPT;
if (reqlen > sizeof(*pptpReq))
reqlen = sizeof(*pptpReq);
pptpReq = skb_header_pointer(skb, nexthdr_off, reqlen, &_pptpReq);
if (!pptpReq)
return NF_ACCEPT;
oldsstate = info->sstate;
oldcstate = info->cstate;
spin_lock_bh(&nf_pptp_lock);
/* FIXME: We just blindly assume that the control connection is always
* established from PNS->PAC. However, RFC makes no guarantee */
if (dir == IP_CT_DIR_ORIGINAL)
/* client -> server (PNS -> PAC) */
ret = pptp_outbound_pkt(skb, protoff, ctlh, pptpReq, reqlen, ct,
ctinfo);
else
/* server -> client (PAC -> PNS) */
ret = pptp_inbound_pkt(skb, protoff, ctlh, pptpReq, reqlen, ct,
ctinfo);
pr_debug("sstate: %d->%d, cstate: %d->%d\n",
oldsstate, info->sstate, oldcstate, info->cstate);
spin_unlock_bh(&nf_pptp_lock);
return ret;
}
static const struct nf_conntrack_expect_policy pptp_exp_policy = {
.max_expected = 2,
.timeout = 5 * 60,
};
/* control protocol helper */
static struct nf_conntrack_helper pptp __read_mostly = {
.name = "pptp",
.me = THIS_MODULE,
.tuple.src.l3num = AF_INET,
.tuple.src.u.tcp.port = cpu_to_be16(PPTP_CONTROL_PORT),
.tuple.dst.protonum = IPPROTO_TCP,
.help = conntrack_pptp_help,
.destroy = pptp_destroy_siblings,
.expect_policy = &pptp_exp_policy,
};
static int __init nf_conntrack_pptp_init(void)
{
NF_CT_HELPER_BUILD_BUG_ON(sizeof(struct nf_ct_pptp_master));
return nf_conntrack_helper_register(&pptp);
}
static void __exit nf_conntrack_pptp_fini(void)
{
nf_conntrack_helper_unregister(&pptp);
}
module_init(nf_conntrack_pptp_init);
module_exit(nf_conntrack_pptp_fini);
| linux-master | net/netfilter/nf_conntrack_pptp.c |
// SPDX-License-Identifier: GPL-2.0-only
/* (C) 2001-2002 Magnus Boden <[email protected]>
*/
#include <linux/module.h>
#include <linux/udp.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_nat_helper.h>
#include <linux/netfilter/nf_conntrack_tftp.h>
#define NAT_HELPER_NAME "tftp"
MODULE_AUTHOR("Magnus Boden <[email protected]>");
MODULE_DESCRIPTION("TFTP NAT helper");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NF_NAT_HELPER(NAT_HELPER_NAME);
static struct nf_conntrack_nat_helper nat_helper_tftp =
NF_CT_NAT_HELPER_INIT(NAT_HELPER_NAME);
static unsigned int help(struct sk_buff *skb,
enum ip_conntrack_info ctinfo,
struct nf_conntrack_expect *exp)
{
const struct nf_conn *ct = exp->master;
exp->saved_proto.udp.port
= ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port;
exp->dir = IP_CT_DIR_REPLY;
exp->expectfn = nf_nat_follow_master;
if (nf_ct_expect_related(exp, 0) != 0) {
nf_ct_helper_log(skb, exp->master, "cannot add expectation");
return NF_DROP;
}
return NF_ACCEPT;
}
static void __exit nf_nat_tftp_fini(void)
{
nf_nat_helper_unregister(&nat_helper_tftp);
RCU_INIT_POINTER(nf_nat_tftp_hook, NULL);
synchronize_rcu();
}
static int __init nf_nat_tftp_init(void)
{
BUG_ON(nf_nat_tftp_hook != NULL);
nf_nat_helper_register(&nat_helper_tftp);
RCU_INIT_POINTER(nf_nat_tftp_hook, help);
return 0;
}
module_init(nf_nat_tftp_init);
module_exit(nf_nat_tftp_fini);
| linux-master | net/netfilter/nf_nat_tftp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* xt_hashlimit - Netfilter module to limit the number of packets per time
* separately for each hashbucket (sourceip/sourceport/dstip/dstport)
*
* (C) 2003-2004 by Harald Welte <[email protected]>
* (C) 2006-2012 Patrick McHardy <[email protected]>
* Copyright © CC Computer Consultants GmbH, 2007 - 2008
*
* Development of this code was funded by Astaro AG, http://www.astaro.com/
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/random.h>
#include <linux/jhash.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/list.h>
#include <linux/skbuff.h>
#include <linux/mm.h>
#include <linux/in.h>
#include <linux/ip.h>
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
#include <linux/ipv6.h>
#include <net/ipv6.h>
#endif
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <linux/mutex.h>
#include <linux/kernel.h>
#include <linux/refcount.h>
#include <uapi/linux/netfilter/xt_hashlimit.h>
#define XT_HASHLIMIT_ALL (XT_HASHLIMIT_HASH_DIP | XT_HASHLIMIT_HASH_DPT | \
XT_HASHLIMIT_HASH_SIP | XT_HASHLIMIT_HASH_SPT | \
XT_HASHLIMIT_INVERT | XT_HASHLIMIT_BYTES |\
XT_HASHLIMIT_RATE_MATCH)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <[email protected]>");
MODULE_AUTHOR("Jan Engelhardt <[email protected]>");
MODULE_DESCRIPTION("Xtables: per hash-bucket rate-limit match");
MODULE_ALIAS("ipt_hashlimit");
MODULE_ALIAS("ip6t_hashlimit");
struct hashlimit_net {
struct hlist_head htables;
struct proc_dir_entry *ipt_hashlimit;
struct proc_dir_entry *ip6t_hashlimit;
};
static unsigned int hashlimit_net_id;
static inline struct hashlimit_net *hashlimit_pernet(struct net *net)
{
return net_generic(net, hashlimit_net_id);
}
/* need to declare this at the top */
static const struct seq_operations dl_seq_ops_v2;
static const struct seq_operations dl_seq_ops_v1;
static const struct seq_operations dl_seq_ops;
/* hash table crap */
struct dsthash_dst {
union {
struct {
__be32 src;
__be32 dst;
} ip;
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
struct {
__be32 src[4];
__be32 dst[4];
} ip6;
#endif
};
__be16 src_port;
__be16 dst_port;
};
struct dsthash_ent {
/* static / read-only parts in the beginning */
struct hlist_node node;
struct dsthash_dst dst;
/* modified structure members in the end */
spinlock_t lock;
unsigned long expires; /* precalculated expiry time */
struct {
unsigned long prev; /* last modification */
union {
struct {
u_int64_t credit;
u_int64_t credit_cap;
u_int64_t cost;
};
struct {
u_int32_t interval, prev_window;
u_int64_t current_rate;
u_int64_t rate;
int64_t burst;
};
};
} rateinfo;
struct rcu_head rcu;
};
struct xt_hashlimit_htable {
struct hlist_node node; /* global list of all htables */
refcount_t use;
u_int8_t family;
bool rnd_initialized;
struct hashlimit_cfg3 cfg; /* config */
/* used internally */
spinlock_t lock; /* lock for list_head */
u_int32_t rnd; /* random seed for hash */
unsigned int count; /* number entries in table */
struct delayed_work gc_work;
/* seq_file stuff */
struct proc_dir_entry *pde;
const char *name;
struct net *net;
struct hlist_head hash[]; /* hashtable itself */
};
static int
cfg_copy(struct hashlimit_cfg3 *to, const void *from, int revision)
{
if (revision == 1) {
struct hashlimit_cfg1 *cfg = (struct hashlimit_cfg1 *)from;
to->mode = cfg->mode;
to->avg = cfg->avg;
to->burst = cfg->burst;
to->size = cfg->size;
to->max = cfg->max;
to->gc_interval = cfg->gc_interval;
to->expire = cfg->expire;
to->srcmask = cfg->srcmask;
to->dstmask = cfg->dstmask;
} else if (revision == 2) {
struct hashlimit_cfg2 *cfg = (struct hashlimit_cfg2 *)from;
to->mode = cfg->mode;
to->avg = cfg->avg;
to->burst = cfg->burst;
to->size = cfg->size;
to->max = cfg->max;
to->gc_interval = cfg->gc_interval;
to->expire = cfg->expire;
to->srcmask = cfg->srcmask;
to->dstmask = cfg->dstmask;
} else if (revision == 3) {
memcpy(to, from, sizeof(struct hashlimit_cfg3));
} else {
return -EINVAL;
}
return 0;
}
static DEFINE_MUTEX(hashlimit_mutex); /* protects htables list */
static struct kmem_cache *hashlimit_cachep __read_mostly;
static inline bool dst_cmp(const struct dsthash_ent *ent,
const struct dsthash_dst *b)
{
return !memcmp(&ent->dst, b, sizeof(ent->dst));
}
static u_int32_t
hash_dst(const struct xt_hashlimit_htable *ht, const struct dsthash_dst *dst)
{
u_int32_t hash = jhash2((const u32 *)dst,
sizeof(*dst)/sizeof(u32),
ht->rnd);
/*
* Instead of returning hash % ht->cfg.size (implying a divide)
* we return the high 32 bits of the (hash * ht->cfg.size) that will
* give results between [0 and cfg.size-1] and same hash distribution,
* but using a multiply, less expensive than a divide
*/
return reciprocal_scale(hash, ht->cfg.size);
}
static struct dsthash_ent *
dsthash_find(const struct xt_hashlimit_htable *ht,
const struct dsthash_dst *dst)
{
struct dsthash_ent *ent;
u_int32_t hash = hash_dst(ht, dst);
if (!hlist_empty(&ht->hash[hash])) {
hlist_for_each_entry_rcu(ent, &ht->hash[hash], node)
if (dst_cmp(ent, dst)) {
spin_lock(&ent->lock);
return ent;
}
}
return NULL;
}
/* allocate dsthash_ent, initialize dst, put in htable and lock it */
static struct dsthash_ent *
dsthash_alloc_init(struct xt_hashlimit_htable *ht,
const struct dsthash_dst *dst, bool *race)
{
struct dsthash_ent *ent;
spin_lock(&ht->lock);
/* Two or more packets may race to create the same entry in the
* hashtable, double check if this packet lost race.
*/
ent = dsthash_find(ht, dst);
if (ent != NULL) {
spin_unlock(&ht->lock);
*race = true;
return ent;
}
/* initialize hash with random val at the time we allocate
* the first hashtable entry */
if (unlikely(!ht->rnd_initialized)) {
get_random_bytes(&ht->rnd, sizeof(ht->rnd));
ht->rnd_initialized = true;
}
if (ht->cfg.max && ht->count >= ht->cfg.max) {
/* FIXME: do something. question is what.. */
net_err_ratelimited("max count of %u reached\n", ht->cfg.max);
ent = NULL;
} else
ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC);
if (ent) {
memcpy(&ent->dst, dst, sizeof(ent->dst));
spin_lock_init(&ent->lock);
spin_lock(&ent->lock);
hlist_add_head_rcu(&ent->node, &ht->hash[hash_dst(ht, dst)]);
ht->count++;
}
spin_unlock(&ht->lock);
return ent;
}
static void dsthash_free_rcu(struct rcu_head *head)
{
struct dsthash_ent *ent = container_of(head, struct dsthash_ent, rcu);
kmem_cache_free(hashlimit_cachep, ent);
}
static inline void
dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent)
{
hlist_del_rcu(&ent->node);
call_rcu(&ent->rcu, dsthash_free_rcu);
ht->count--;
}
static void htable_gc(struct work_struct *work);
static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
const char *name, u_int8_t family,
struct xt_hashlimit_htable **out_hinfo,
int revision)
{
struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
struct xt_hashlimit_htable *hinfo;
const struct seq_operations *ops;
unsigned int size, i;
unsigned long nr_pages = totalram_pages();
int ret;
if (cfg->size) {
size = cfg->size;
} else {
size = (nr_pages << PAGE_SHIFT) / 16384 /
sizeof(struct hlist_head);
if (nr_pages > 1024 * 1024 * 1024 / PAGE_SIZE)
size = 8192;
if (size < 16)
size = 16;
}
/* FIXME: don't use vmalloc() here or anywhere else -HW */
hinfo = vmalloc(struct_size(hinfo, hash, size));
if (hinfo == NULL)
return -ENOMEM;
*out_hinfo = hinfo;
/* copy match config into hashtable config */
ret = cfg_copy(&hinfo->cfg, (void *)cfg, 3);
if (ret) {
vfree(hinfo);
return ret;
}
hinfo->cfg.size = size;
if (hinfo->cfg.max == 0)
hinfo->cfg.max = 8 * hinfo->cfg.size;
else if (hinfo->cfg.max < hinfo->cfg.size)
hinfo->cfg.max = hinfo->cfg.size;
for (i = 0; i < hinfo->cfg.size; i++)
INIT_HLIST_HEAD(&hinfo->hash[i]);
refcount_set(&hinfo->use, 1);
hinfo->count = 0;
hinfo->family = family;
hinfo->rnd_initialized = false;
hinfo->name = kstrdup(name, GFP_KERNEL);
if (!hinfo->name) {
vfree(hinfo);
return -ENOMEM;
}
spin_lock_init(&hinfo->lock);
switch (revision) {
case 1:
ops = &dl_seq_ops_v1;
break;
case 2:
ops = &dl_seq_ops_v2;
break;
default:
ops = &dl_seq_ops;
}
hinfo->pde = proc_create_seq_data(name, 0,
(family == NFPROTO_IPV4) ?
hashlimit_net->ipt_hashlimit : hashlimit_net->ip6t_hashlimit,
ops, hinfo);
if (hinfo->pde == NULL) {
kfree(hinfo->name);
vfree(hinfo);
return -ENOMEM;
}
hinfo->net = net;
INIT_DEFERRABLE_WORK(&hinfo->gc_work, htable_gc);
queue_delayed_work(system_power_efficient_wq, &hinfo->gc_work,
msecs_to_jiffies(hinfo->cfg.gc_interval));
hlist_add_head(&hinfo->node, &hashlimit_net->htables);
return 0;
}
static void htable_selective_cleanup(struct xt_hashlimit_htable *ht, bool select_all)
{
unsigned int i;
for (i = 0; i < ht->cfg.size; i++) {
struct dsthash_ent *dh;
struct hlist_node *n;
spin_lock_bh(&ht->lock);
hlist_for_each_entry_safe(dh, n, &ht->hash[i], node) {
if (time_after_eq(jiffies, dh->expires) || select_all)
dsthash_free(ht, dh);
}
spin_unlock_bh(&ht->lock);
cond_resched();
}
}
static void htable_gc(struct work_struct *work)
{
struct xt_hashlimit_htable *ht;
ht = container_of(work, struct xt_hashlimit_htable, gc_work.work);
htable_selective_cleanup(ht, false);
queue_delayed_work(system_power_efficient_wq,
&ht->gc_work, msecs_to_jiffies(ht->cfg.gc_interval));
}
static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo)
{
struct hashlimit_net *hashlimit_net = hashlimit_pernet(hinfo->net);
struct proc_dir_entry *parent;
if (hinfo->family == NFPROTO_IPV4)
parent = hashlimit_net->ipt_hashlimit;
else
parent = hashlimit_net->ip6t_hashlimit;
if (parent != NULL)
remove_proc_entry(hinfo->name, parent);
}
static struct xt_hashlimit_htable *htable_find_get(struct net *net,
const char *name,
u_int8_t family)
{
struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
struct xt_hashlimit_htable *hinfo;
hlist_for_each_entry(hinfo, &hashlimit_net->htables, node) {
if (!strcmp(name, hinfo->name) &&
hinfo->family == family) {
refcount_inc(&hinfo->use);
return hinfo;
}
}
return NULL;
}
static void htable_put(struct xt_hashlimit_htable *hinfo)
{
if (refcount_dec_and_mutex_lock(&hinfo->use, &hashlimit_mutex)) {
hlist_del(&hinfo->node);
htable_remove_proc_entry(hinfo);
mutex_unlock(&hashlimit_mutex);
cancel_delayed_work_sync(&hinfo->gc_work);
htable_selective_cleanup(hinfo, true);
kfree(hinfo->name);
vfree(hinfo);
}
}
/* The algorithm used is the Simple Token Bucket Filter (TBF)
* see net/sched/sch_tbf.c in the linux source tree
*/
/* Rusty: This is my (non-mathematically-inclined) understanding of
this algorithm. The `average rate' in jiffies becomes your initial
amount of credit `credit' and the most credit you can ever have
`credit_cap'. The `peak rate' becomes the cost of passing the
test, `cost'.
`prev' tracks the last packet hit: you gain one credit per jiffy.
If you get credit balance more than this, the extra credit is
discarded. Every time the match passes, you lose `cost' credits;
if you don't have that many, the test fails.
See Alexey's formal explanation in net/sched/sch_tbf.c.
To get the maximum range, we multiply by this factor (ie. you get N
credits per jiffy). We want to allow a rate as low as 1 per day
(slowest userspace tool allows), which means
CREDITS_PER_JIFFY*HZ*60*60*24 < 2^32 ie.
*/
#define MAX_CPJ_v1 (0xFFFFFFFF / (HZ*60*60*24))
#define MAX_CPJ (0xFFFFFFFFFFFFFFFFULL / (HZ*60*60*24))
/* Repeated shift and or gives us all 1s, final shift and add 1 gives
* us the power of 2 below the theoretical max, so GCC simply does a
* shift. */
#define _POW2_BELOW2(x) ((x)|((x)>>1))
#define _POW2_BELOW4(x) (_POW2_BELOW2(x)|_POW2_BELOW2((x)>>2))
#define _POW2_BELOW8(x) (_POW2_BELOW4(x)|_POW2_BELOW4((x)>>4))
#define _POW2_BELOW16(x) (_POW2_BELOW8(x)|_POW2_BELOW8((x)>>8))
#define _POW2_BELOW32(x) (_POW2_BELOW16(x)|_POW2_BELOW16((x)>>16))
#define _POW2_BELOW64(x) (_POW2_BELOW32(x)|_POW2_BELOW32((x)>>32))
#define POW2_BELOW32(x) ((_POW2_BELOW32(x)>>1) + 1)
#define POW2_BELOW64(x) ((_POW2_BELOW64(x)>>1) + 1)
#define CREDITS_PER_JIFFY POW2_BELOW64(MAX_CPJ)
#define CREDITS_PER_JIFFY_v1 POW2_BELOW32(MAX_CPJ_v1)
/* in byte mode, the lowest possible rate is one packet/second.
* credit_cap is used as a counter that tells us how many times we can
* refill the "credits available" counter when it becomes empty.
*/
#define MAX_CPJ_BYTES (0xFFFFFFFF / HZ)
#define CREDITS_PER_JIFFY_BYTES POW2_BELOW32(MAX_CPJ_BYTES)
static u32 xt_hashlimit_len_to_chunks(u32 len)
{
return (len >> XT_HASHLIMIT_BYTE_SHIFT) + 1;
}
/* Precision saver. */
static u64 user2credits(u64 user, int revision)
{
u64 scale = (revision == 1) ?
XT_HASHLIMIT_SCALE : XT_HASHLIMIT_SCALE_v2;
u64 cpj = (revision == 1) ?
CREDITS_PER_JIFFY_v1 : CREDITS_PER_JIFFY;
/* Avoid overflow: divide the constant operands first */
if (scale >= HZ * cpj)
return div64_u64(user, div64_u64(scale, HZ * cpj));
return user * div64_u64(HZ * cpj, scale);
}
static u32 user2credits_byte(u32 user)
{
u64 us = user;
us *= HZ * CREDITS_PER_JIFFY_BYTES;
return (u32) (us >> 32);
}
static u64 user2rate(u64 user)
{
if (user != 0) {
return div64_u64(XT_HASHLIMIT_SCALE_v2, user);
} else {
pr_info_ratelimited("invalid rate from userspace: %llu\n",
user);
return 0;
}
}
static u64 user2rate_bytes(u32 user)
{
u64 r;
r = user ? U32_MAX / user : U32_MAX;
return (r - 1) << XT_HASHLIMIT_BYTE_SHIFT;
}
static void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now,
u32 mode, int revision)
{
unsigned long delta = now - dh->rateinfo.prev;
u64 cap, cpj;
if (delta == 0)
return;
if (revision >= 3 && mode & XT_HASHLIMIT_RATE_MATCH) {
u64 interval = dh->rateinfo.interval * HZ;
if (delta < interval)
return;
dh->rateinfo.prev = now;
dh->rateinfo.prev_window =
((dh->rateinfo.current_rate * interval) >
(delta * dh->rateinfo.rate));
dh->rateinfo.current_rate = 0;
return;
}
dh->rateinfo.prev = now;
if (mode & XT_HASHLIMIT_BYTES) {
u64 tmp = dh->rateinfo.credit;
dh->rateinfo.credit += CREDITS_PER_JIFFY_BYTES * delta;
cap = CREDITS_PER_JIFFY_BYTES * HZ;
if (tmp >= dh->rateinfo.credit) {/* overflow */
dh->rateinfo.credit = cap;
return;
}
} else {
cpj = (revision == 1) ?
CREDITS_PER_JIFFY_v1 : CREDITS_PER_JIFFY;
dh->rateinfo.credit += delta * cpj;
cap = dh->rateinfo.credit_cap;
}
if (dh->rateinfo.credit > cap)
dh->rateinfo.credit = cap;
}
static void rateinfo_init(struct dsthash_ent *dh,
struct xt_hashlimit_htable *hinfo, int revision)
{
dh->rateinfo.prev = jiffies;
if (revision >= 3 && hinfo->cfg.mode & XT_HASHLIMIT_RATE_MATCH) {
dh->rateinfo.prev_window = 0;
dh->rateinfo.current_rate = 0;
if (hinfo->cfg.mode & XT_HASHLIMIT_BYTES) {
dh->rateinfo.rate =
user2rate_bytes((u32)hinfo->cfg.avg);
if (hinfo->cfg.burst)
dh->rateinfo.burst =
hinfo->cfg.burst * dh->rateinfo.rate;
else
dh->rateinfo.burst = dh->rateinfo.rate;
} else {
dh->rateinfo.rate = user2rate(hinfo->cfg.avg);
dh->rateinfo.burst =
hinfo->cfg.burst + dh->rateinfo.rate;
}
dh->rateinfo.interval = hinfo->cfg.interval;
} else if (hinfo->cfg.mode & XT_HASHLIMIT_BYTES) {
dh->rateinfo.credit = CREDITS_PER_JIFFY_BYTES * HZ;
dh->rateinfo.cost = user2credits_byte(hinfo->cfg.avg);
dh->rateinfo.credit_cap = hinfo->cfg.burst;
} else {
dh->rateinfo.credit = user2credits(hinfo->cfg.avg *
hinfo->cfg.burst, revision);
dh->rateinfo.cost = user2credits(hinfo->cfg.avg, revision);
dh->rateinfo.credit_cap = dh->rateinfo.credit;
}
}
static inline __be32 maskl(__be32 a, unsigned int l)
{
return l ? htonl(ntohl(a) & ~0 << (32 - l)) : 0;
}
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
static void hashlimit_ipv6_mask(__be32 *i, unsigned int p)
{
switch (p) {
case 0 ... 31:
i[0] = maskl(i[0], p);
i[1] = i[2] = i[3] = 0;
break;
case 32 ... 63:
i[1] = maskl(i[1], p - 32);
i[2] = i[3] = 0;
break;
case 64 ... 95:
i[2] = maskl(i[2], p - 64);
i[3] = 0;
break;
case 96 ... 127:
i[3] = maskl(i[3], p - 96);
break;
case 128:
break;
}
}
#endif
static int
hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
struct dsthash_dst *dst,
const struct sk_buff *skb, unsigned int protoff)
{
__be16 _ports[2], *ports;
u8 nexthdr;
int poff;
memset(dst, 0, sizeof(*dst));
switch (hinfo->family) {
case NFPROTO_IPV4:
if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP)
dst->ip.dst = maskl(ip_hdr(skb)->daddr,
hinfo->cfg.dstmask);
if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP)
dst->ip.src = maskl(ip_hdr(skb)->saddr,
hinfo->cfg.srcmask);
if (!(hinfo->cfg.mode &
(XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT)))
return 0;
nexthdr = ip_hdr(skb)->protocol;
break;
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
case NFPROTO_IPV6:
{
__be16 frag_off;
if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP) {
memcpy(&dst->ip6.dst, &ipv6_hdr(skb)->daddr,
sizeof(dst->ip6.dst));
hashlimit_ipv6_mask(dst->ip6.dst, hinfo->cfg.dstmask);
}
if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP) {
memcpy(&dst->ip6.src, &ipv6_hdr(skb)->saddr,
sizeof(dst->ip6.src));
hashlimit_ipv6_mask(dst->ip6.src, hinfo->cfg.srcmask);
}
if (!(hinfo->cfg.mode &
(XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT)))
return 0;
nexthdr = ipv6_hdr(skb)->nexthdr;
protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, &frag_off);
if ((int)protoff < 0)
return -1;
break;
}
#endif
default:
BUG();
return 0;
}
poff = proto_ports_offset(nexthdr);
if (poff >= 0) {
ports = skb_header_pointer(skb, protoff + poff, sizeof(_ports),
&_ports);
} else {
_ports[0] = _ports[1] = 0;
ports = _ports;
}
if (!ports)
return -1;
if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SPT)
dst->src_port = ports[0];
if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DPT)
dst->dst_port = ports[1];
return 0;
}
static u32 hashlimit_byte_cost(unsigned int len, struct dsthash_ent *dh)
{
u64 tmp = xt_hashlimit_len_to_chunks(len);
tmp = tmp * dh->rateinfo.cost;
if (unlikely(tmp > CREDITS_PER_JIFFY_BYTES * HZ))
tmp = CREDITS_PER_JIFFY_BYTES * HZ;
if (dh->rateinfo.credit < tmp && dh->rateinfo.credit_cap) {
dh->rateinfo.credit_cap--;
dh->rateinfo.credit = CREDITS_PER_JIFFY_BYTES * HZ;
}
return (u32) tmp;
}
static bool
hashlimit_mt_common(const struct sk_buff *skb, struct xt_action_param *par,
struct xt_hashlimit_htable *hinfo,
const struct hashlimit_cfg3 *cfg, int revision)
{
unsigned long now = jiffies;
struct dsthash_ent *dh;
struct dsthash_dst dst;
bool race = false;
u64 cost;
if (hashlimit_init_dst(hinfo, &dst, skb, par->thoff) < 0)
goto hotdrop;
local_bh_disable();
dh = dsthash_find(hinfo, &dst);
if (dh == NULL) {
dh = dsthash_alloc_init(hinfo, &dst, &race);
if (dh == NULL) {
local_bh_enable();
goto hotdrop;
} else if (race) {
/* Already got an entry, update expiration timeout */
dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
rateinfo_recalc(dh, now, hinfo->cfg.mode, revision);
} else {
dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire);
rateinfo_init(dh, hinfo, revision);
}
} else {
/* update expiration timeout */
dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
rateinfo_recalc(dh, now, hinfo->cfg.mode, revision);
}
if (cfg->mode & XT_HASHLIMIT_RATE_MATCH) {
cost = (cfg->mode & XT_HASHLIMIT_BYTES) ? skb->len : 1;
dh->rateinfo.current_rate += cost;
if (!dh->rateinfo.prev_window &&
(dh->rateinfo.current_rate <= dh->rateinfo.burst)) {
spin_unlock(&dh->lock);
local_bh_enable();
return !(cfg->mode & XT_HASHLIMIT_INVERT);
} else {
goto overlimit;
}
}
if (cfg->mode & XT_HASHLIMIT_BYTES)
cost = hashlimit_byte_cost(skb->len, dh);
else
cost = dh->rateinfo.cost;
if (dh->rateinfo.credit >= cost) {
/* below the limit */
dh->rateinfo.credit -= cost;
spin_unlock(&dh->lock);
local_bh_enable();
return !(cfg->mode & XT_HASHLIMIT_INVERT);
}
overlimit:
spin_unlock(&dh->lock);
local_bh_enable();
/* default match is underlimit - so over the limit, we need to invert */
return cfg->mode & XT_HASHLIMIT_INVERT;
hotdrop:
par->hotdrop = true;
return false;
}
static bool
hashlimit_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
struct xt_hashlimit_htable *hinfo = info->hinfo;
struct hashlimit_cfg3 cfg = {};
int ret;
ret = cfg_copy(&cfg, (void *)&info->cfg, 1);
if (ret)
return ret;
return hashlimit_mt_common(skb, par, hinfo, &cfg, 1);
}
static bool
hashlimit_mt_v2(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_hashlimit_mtinfo2 *info = par->matchinfo;
struct xt_hashlimit_htable *hinfo = info->hinfo;
struct hashlimit_cfg3 cfg = {};
int ret;
ret = cfg_copy(&cfg, (void *)&info->cfg, 2);
if (ret)
return ret;
return hashlimit_mt_common(skb, par, hinfo, &cfg, 2);
}
static bool
hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_hashlimit_mtinfo3 *info = par->matchinfo;
struct xt_hashlimit_htable *hinfo = info->hinfo;
return hashlimit_mt_common(skb, par, hinfo, &info->cfg, 3);
}
#define HASHLIMIT_MAX_SIZE 1048576
static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
struct xt_hashlimit_htable **hinfo,
struct hashlimit_cfg3 *cfg,
const char *name, int revision)
{
struct net *net = par->net;
int ret;
if (cfg->gc_interval == 0 || cfg->expire == 0)
return -EINVAL;
if (cfg->size > HASHLIMIT_MAX_SIZE) {
cfg->size = HASHLIMIT_MAX_SIZE;
pr_info_ratelimited("size too large, truncated to %u\n", cfg->size);
}
if (cfg->max > HASHLIMIT_MAX_SIZE) {
cfg->max = HASHLIMIT_MAX_SIZE;
pr_info_ratelimited("max too large, truncated to %u\n", cfg->max);
}
if (par->family == NFPROTO_IPV4) {
if (cfg->srcmask > 32 || cfg->dstmask > 32)
return -EINVAL;
} else {
if (cfg->srcmask > 128 || cfg->dstmask > 128)
return -EINVAL;
}
if (cfg->mode & ~XT_HASHLIMIT_ALL) {
pr_info_ratelimited("Unknown mode mask %X, kernel too old?\n",
cfg->mode);
return -EINVAL;
}
/* Check for overflow. */
if (revision >= 3 && cfg->mode & XT_HASHLIMIT_RATE_MATCH) {
if (cfg->avg == 0 || cfg->avg > U32_MAX) {
pr_info_ratelimited("invalid rate\n");
return -ERANGE;
}
if (cfg->interval == 0) {
pr_info_ratelimited("invalid interval\n");
return -EINVAL;
}
} else if (cfg->mode & XT_HASHLIMIT_BYTES) {
if (user2credits_byte(cfg->avg) == 0) {
pr_info_ratelimited("overflow, rate too high: %llu\n",
cfg->avg);
return -EINVAL;
}
} else if (cfg->burst == 0 ||
user2credits(cfg->avg * cfg->burst, revision) <
user2credits(cfg->avg, revision)) {
pr_info_ratelimited("overflow, try lower: %llu/%llu\n",
cfg->avg, cfg->burst);
return -ERANGE;
}
mutex_lock(&hashlimit_mutex);
*hinfo = htable_find_get(net, name, par->family);
if (*hinfo == NULL) {
ret = htable_create(net, cfg, name, par->family,
hinfo, revision);
if (ret < 0) {
mutex_unlock(&hashlimit_mutex);
return ret;
}
}
mutex_unlock(&hashlimit_mutex);
return 0;
}
static int hashlimit_mt_check_v1(const struct xt_mtchk_param *par)
{
struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
struct hashlimit_cfg3 cfg = {};
int ret;
ret = xt_check_proc_name(info->name, sizeof(info->name));
if (ret)
return ret;
ret = cfg_copy(&cfg, (void *)&info->cfg, 1);
if (ret)
return ret;
return hashlimit_mt_check_common(par, &info->hinfo,
&cfg, info->name, 1);
}
static int hashlimit_mt_check_v2(const struct xt_mtchk_param *par)
{
struct xt_hashlimit_mtinfo2 *info = par->matchinfo;
struct hashlimit_cfg3 cfg = {};
int ret;
ret = xt_check_proc_name(info->name, sizeof(info->name));
if (ret)
return ret;
ret = cfg_copy(&cfg, (void *)&info->cfg, 2);
if (ret)
return ret;
return hashlimit_mt_check_common(par, &info->hinfo,
&cfg, info->name, 2);
}
static int hashlimit_mt_check(const struct xt_mtchk_param *par)
{
struct xt_hashlimit_mtinfo3 *info = par->matchinfo;
int ret;
ret = xt_check_proc_name(info->name, sizeof(info->name));
if (ret)
return ret;
return hashlimit_mt_check_common(par, &info->hinfo, &info->cfg,
info->name, 3);
}
static void hashlimit_mt_destroy_v2(const struct xt_mtdtor_param *par)
{
const struct xt_hashlimit_mtinfo2 *info = par->matchinfo;
htable_put(info->hinfo);
}
static void hashlimit_mt_destroy_v1(const struct xt_mtdtor_param *par)
{
const struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
htable_put(info->hinfo);
}
static void hashlimit_mt_destroy(const struct xt_mtdtor_param *par)
{
const struct xt_hashlimit_mtinfo3 *info = par->matchinfo;
htable_put(info->hinfo);
}
static struct xt_match hashlimit_mt_reg[] __read_mostly = {
{
.name = "hashlimit",
.revision = 1,
.family = NFPROTO_IPV4,
.match = hashlimit_mt_v1,
.matchsize = sizeof(struct xt_hashlimit_mtinfo1),
.usersize = offsetof(struct xt_hashlimit_mtinfo1, hinfo),
.checkentry = hashlimit_mt_check_v1,
.destroy = hashlimit_mt_destroy_v1,
.me = THIS_MODULE,
},
{
.name = "hashlimit",
.revision = 2,
.family = NFPROTO_IPV4,
.match = hashlimit_mt_v2,
.matchsize = sizeof(struct xt_hashlimit_mtinfo2),
.usersize = offsetof(struct xt_hashlimit_mtinfo2, hinfo),
.checkentry = hashlimit_mt_check_v2,
.destroy = hashlimit_mt_destroy_v2,
.me = THIS_MODULE,
},
{
.name = "hashlimit",
.revision = 3,
.family = NFPROTO_IPV4,
.match = hashlimit_mt,
.matchsize = sizeof(struct xt_hashlimit_mtinfo3),
.usersize = offsetof(struct xt_hashlimit_mtinfo3, hinfo),
.checkentry = hashlimit_mt_check,
.destroy = hashlimit_mt_destroy,
.me = THIS_MODULE,
},
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
{
.name = "hashlimit",
.revision = 1,
.family = NFPROTO_IPV6,
.match = hashlimit_mt_v1,
.matchsize = sizeof(struct xt_hashlimit_mtinfo1),
.usersize = offsetof(struct xt_hashlimit_mtinfo1, hinfo),
.checkentry = hashlimit_mt_check_v1,
.destroy = hashlimit_mt_destroy_v1,
.me = THIS_MODULE,
},
{
.name = "hashlimit",
.revision = 2,
.family = NFPROTO_IPV6,
.match = hashlimit_mt_v2,
.matchsize = sizeof(struct xt_hashlimit_mtinfo2),
.usersize = offsetof(struct xt_hashlimit_mtinfo2, hinfo),
.checkentry = hashlimit_mt_check_v2,
.destroy = hashlimit_mt_destroy_v2,
.me = THIS_MODULE,
},
{
.name = "hashlimit",
.revision = 3,
.family = NFPROTO_IPV6,
.match = hashlimit_mt,
.matchsize = sizeof(struct xt_hashlimit_mtinfo3),
.usersize = offsetof(struct xt_hashlimit_mtinfo3, hinfo),
.checkentry = hashlimit_mt_check,
.destroy = hashlimit_mt_destroy,
.me = THIS_MODULE,
},
#endif
};
/* PROC stuff */
static void *dl_seq_start(struct seq_file *s, loff_t *pos)
__acquires(htable->lock)
{
struct xt_hashlimit_htable *htable = pde_data(file_inode(s->file));
unsigned int *bucket;
spin_lock_bh(&htable->lock);
if (*pos >= htable->cfg.size)
return NULL;
bucket = kmalloc(sizeof(unsigned int), GFP_ATOMIC);
if (!bucket)
return ERR_PTR(-ENOMEM);
*bucket = *pos;
return bucket;
}
static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
struct xt_hashlimit_htable *htable = pde_data(file_inode(s->file));
unsigned int *bucket = v;
*pos = ++(*bucket);
if (*pos >= htable->cfg.size) {
kfree(v);
return NULL;
}
return bucket;
}
static void dl_seq_stop(struct seq_file *s, void *v)
__releases(htable->lock)
{
struct xt_hashlimit_htable *htable = pde_data(file_inode(s->file));
unsigned int *bucket = v;
if (!IS_ERR(bucket))
kfree(bucket);
spin_unlock_bh(&htable->lock);
}
static void dl_seq_print(struct dsthash_ent *ent, u_int8_t family,
struct seq_file *s)
{
switch (family) {
case NFPROTO_IPV4:
seq_printf(s, "%ld %pI4:%u->%pI4:%u %llu %llu %llu\n",
(long)(ent->expires - jiffies)/HZ,
&ent->dst.ip.src,
ntohs(ent->dst.src_port),
&ent->dst.ip.dst,
ntohs(ent->dst.dst_port),
ent->rateinfo.credit, ent->rateinfo.credit_cap,
ent->rateinfo.cost);
break;
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
case NFPROTO_IPV6:
seq_printf(s, "%ld %pI6:%u->%pI6:%u %llu %llu %llu\n",
(long)(ent->expires - jiffies)/HZ,
&ent->dst.ip6.src,
ntohs(ent->dst.src_port),
&ent->dst.ip6.dst,
ntohs(ent->dst.dst_port),
ent->rateinfo.credit, ent->rateinfo.credit_cap,
ent->rateinfo.cost);
break;
#endif
default:
BUG();
}
}
static int dl_seq_real_show_v2(struct dsthash_ent *ent, u_int8_t family,
struct seq_file *s)
{
struct xt_hashlimit_htable *ht = pde_data(file_inode(s->file));
spin_lock(&ent->lock);
/* recalculate to show accurate numbers */
rateinfo_recalc(ent, jiffies, ht->cfg.mode, 2);
dl_seq_print(ent, family, s);
spin_unlock(&ent->lock);
return seq_has_overflowed(s);
}
static int dl_seq_real_show_v1(struct dsthash_ent *ent, u_int8_t family,
struct seq_file *s)
{
struct xt_hashlimit_htable *ht = pde_data(file_inode(s->file));
spin_lock(&ent->lock);
/* recalculate to show accurate numbers */
rateinfo_recalc(ent, jiffies, ht->cfg.mode, 1);
dl_seq_print(ent, family, s);
spin_unlock(&ent->lock);
return seq_has_overflowed(s);
}
static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
struct seq_file *s)
{
struct xt_hashlimit_htable *ht = pde_data(file_inode(s->file));
spin_lock(&ent->lock);
/* recalculate to show accurate numbers */
rateinfo_recalc(ent, jiffies, ht->cfg.mode, 3);
dl_seq_print(ent, family, s);
spin_unlock(&ent->lock);
return seq_has_overflowed(s);
}
static int dl_seq_show_v2(struct seq_file *s, void *v)
{
struct xt_hashlimit_htable *htable = pde_data(file_inode(s->file));
unsigned int *bucket = (unsigned int *)v;
struct dsthash_ent *ent;
if (!hlist_empty(&htable->hash[*bucket])) {
hlist_for_each_entry(ent, &htable->hash[*bucket], node)
if (dl_seq_real_show_v2(ent, htable->family, s))
return -1;
}
return 0;
}
static int dl_seq_show_v1(struct seq_file *s, void *v)
{
struct xt_hashlimit_htable *htable = pde_data(file_inode(s->file));
unsigned int *bucket = v;
struct dsthash_ent *ent;
if (!hlist_empty(&htable->hash[*bucket])) {
hlist_for_each_entry(ent, &htable->hash[*bucket], node)
if (dl_seq_real_show_v1(ent, htable->family, s))
return -1;
}
return 0;
}
static int dl_seq_show(struct seq_file *s, void *v)
{
struct xt_hashlimit_htable *htable = pde_data(file_inode(s->file));
unsigned int *bucket = v;
struct dsthash_ent *ent;
if (!hlist_empty(&htable->hash[*bucket])) {
hlist_for_each_entry(ent, &htable->hash[*bucket], node)
if (dl_seq_real_show(ent, htable->family, s))
return -1;
}
return 0;
}
static const struct seq_operations dl_seq_ops_v1 = {
.start = dl_seq_start,
.next = dl_seq_next,
.stop = dl_seq_stop,
.show = dl_seq_show_v1
};
static const struct seq_operations dl_seq_ops_v2 = {
.start = dl_seq_start,
.next = dl_seq_next,
.stop = dl_seq_stop,
.show = dl_seq_show_v2
};
static const struct seq_operations dl_seq_ops = {
.start = dl_seq_start,
.next = dl_seq_next,
.stop = dl_seq_stop,
.show = dl_seq_show
};
static int __net_init hashlimit_proc_net_init(struct net *net)
{
struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
hashlimit_net->ipt_hashlimit = proc_mkdir("ipt_hashlimit", net->proc_net);
if (!hashlimit_net->ipt_hashlimit)
return -ENOMEM;
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
hashlimit_net->ip6t_hashlimit = proc_mkdir("ip6t_hashlimit", net->proc_net);
if (!hashlimit_net->ip6t_hashlimit) {
remove_proc_entry("ipt_hashlimit", net->proc_net);
return -ENOMEM;
}
#endif
return 0;
}
static void __net_exit hashlimit_proc_net_exit(struct net *net)
{
struct xt_hashlimit_htable *hinfo;
struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
/* hashlimit_net_exit() is called before hashlimit_mt_destroy().
* Make sure that the parent ipt_hashlimit and ip6t_hashlimit proc
* entries is empty before trying to remove it.
*/
mutex_lock(&hashlimit_mutex);
hlist_for_each_entry(hinfo, &hashlimit_net->htables, node)
htable_remove_proc_entry(hinfo);
hashlimit_net->ipt_hashlimit = NULL;
hashlimit_net->ip6t_hashlimit = NULL;
mutex_unlock(&hashlimit_mutex);
remove_proc_entry("ipt_hashlimit", net->proc_net);
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
remove_proc_entry("ip6t_hashlimit", net->proc_net);
#endif
}
static int __net_init hashlimit_net_init(struct net *net)
{
struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
INIT_HLIST_HEAD(&hashlimit_net->htables);
return hashlimit_proc_net_init(net);
}
static void __net_exit hashlimit_net_exit(struct net *net)
{
hashlimit_proc_net_exit(net);
}
static struct pernet_operations hashlimit_net_ops = {
.init = hashlimit_net_init,
.exit = hashlimit_net_exit,
.id = &hashlimit_net_id,
.size = sizeof(struct hashlimit_net),
};
static int __init hashlimit_mt_init(void)
{
int err;
err = register_pernet_subsys(&hashlimit_net_ops);
if (err < 0)
return err;
err = xt_register_matches(hashlimit_mt_reg,
ARRAY_SIZE(hashlimit_mt_reg));
if (err < 0)
goto err1;
err = -ENOMEM;
hashlimit_cachep = kmem_cache_create("xt_hashlimit",
sizeof(struct dsthash_ent), 0, 0,
NULL);
if (!hashlimit_cachep) {
pr_warn("unable to create slab cache\n");
goto err2;
}
return 0;
err2:
xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg));
err1:
unregister_pernet_subsys(&hashlimit_net_ops);
return err;
}
static void __exit hashlimit_mt_exit(void)
{
xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg));
unregister_pernet_subsys(&hashlimit_net_ops);
rcu_barrier();
kmem_cache_destroy(hashlimit_cachep);
}
module_init(hashlimit_mt_init);
module_exit(hashlimit_mt_exit);
| linux-master | net/netfilter/xt_hashlimit.c |
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/module.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_socket.h>
#include <net/inet_sock.h>
#include <net/tcp.h>
struct nft_socket {
enum nft_socket_keys key:8;
u8 level;
u8 len;
union {
u8 dreg;
};
};
static void nft_socket_wildcard(const struct nft_pktinfo *pkt,
struct nft_regs *regs, struct sock *sk,
u32 *dest)
{
switch (nft_pf(pkt)) {
case NFPROTO_IPV4:
nft_reg_store8(dest, inet_sk(sk)->inet_rcv_saddr == 0);
break;
#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
case NFPROTO_IPV6:
nft_reg_store8(dest, ipv6_addr_any(&sk->sk_v6_rcv_saddr));
break;
#endif
default:
regs->verdict.code = NFT_BREAK;
return;
}
}
#ifdef CONFIG_SOCK_CGROUP_DATA
static noinline bool
nft_sock_get_eval_cgroupv2(u32 *dest, struct sock *sk, const struct nft_pktinfo *pkt, u32 level)
{
struct cgroup *cgrp;
u64 cgid;
if (!sk_fullsock(sk))
return false;
cgrp = cgroup_ancestor(sock_cgroup_ptr(&sk->sk_cgrp_data), level);
if (!cgrp)
return false;
cgid = cgroup_id(cgrp);
memcpy(dest, &cgid, sizeof(u64));
return true;
}
#endif
static struct sock *nft_socket_do_lookup(const struct nft_pktinfo *pkt)
{
const struct net_device *indev = nft_in(pkt);
const struct sk_buff *skb = pkt->skb;
struct sock *sk = NULL;
if (!indev)
return NULL;
switch (nft_pf(pkt)) {
case NFPROTO_IPV4:
sk = nf_sk_lookup_slow_v4(nft_net(pkt), skb, indev);
break;
#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
case NFPROTO_IPV6:
sk = nf_sk_lookup_slow_v6(nft_net(pkt), skb, indev);
break;
#endif
default:
WARN_ON_ONCE(1);
break;
}
return sk;
}
static void nft_socket_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_socket *priv = nft_expr_priv(expr);
struct sk_buff *skb = pkt->skb;
struct sock *sk = skb->sk;
u32 *dest = ®s->data[priv->dreg];
if (sk && !net_eq(nft_net(pkt), sock_net(sk)))
sk = NULL;
if (!sk)
sk = nft_socket_do_lookup(pkt);
if (!sk) {
regs->verdict.code = NFT_BREAK;
return;
}
switch(priv->key) {
case NFT_SOCKET_TRANSPARENT:
nft_reg_store8(dest, inet_sk_transparent(sk));
break;
case NFT_SOCKET_MARK:
if (sk_fullsock(sk)) {
*dest = READ_ONCE(sk->sk_mark);
} else {
regs->verdict.code = NFT_BREAK;
return;
}
break;
case NFT_SOCKET_WILDCARD:
if (!sk_fullsock(sk)) {
regs->verdict.code = NFT_BREAK;
return;
}
nft_socket_wildcard(pkt, regs, sk, dest);
break;
#ifdef CONFIG_SOCK_CGROUP_DATA
case NFT_SOCKET_CGROUPV2:
if (!nft_sock_get_eval_cgroupv2(dest, sk, pkt, priv->level)) {
regs->verdict.code = NFT_BREAK;
return;
}
break;
#endif
default:
WARN_ON(1);
regs->verdict.code = NFT_BREAK;
}
if (sk != skb->sk)
sock_gen_put(sk);
}
static const struct nla_policy nft_socket_policy[NFTA_SOCKET_MAX + 1] = {
[NFTA_SOCKET_KEY] = NLA_POLICY_MAX(NLA_BE32, 255),
[NFTA_SOCKET_DREG] = { .type = NLA_U32 },
[NFTA_SOCKET_LEVEL] = NLA_POLICY_MAX(NLA_BE32, 255),
};
static int nft_socket_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_socket *priv = nft_expr_priv(expr);
unsigned int len;
if (!tb[NFTA_SOCKET_DREG] || !tb[NFTA_SOCKET_KEY])
return -EINVAL;
switch(ctx->family) {
case NFPROTO_IPV4:
#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
case NFPROTO_IPV6:
#endif
case NFPROTO_INET:
break;
default:
return -EOPNOTSUPP;
}
priv->key = ntohl(nla_get_be32(tb[NFTA_SOCKET_KEY]));
switch(priv->key) {
case NFT_SOCKET_TRANSPARENT:
case NFT_SOCKET_WILDCARD:
len = sizeof(u8);
break;
case NFT_SOCKET_MARK:
len = sizeof(u32);
break;
#ifdef CONFIG_CGROUPS
case NFT_SOCKET_CGROUPV2: {
unsigned int level;
if (!tb[NFTA_SOCKET_LEVEL])
return -EINVAL;
level = ntohl(nla_get_be32(tb[NFTA_SOCKET_LEVEL]));
if (level > 255)
return -EOPNOTSUPP;
priv->level = level;
len = sizeof(u64);
break;
}
#endif
default:
return -EOPNOTSUPP;
}
priv->len = len;
return nft_parse_register_store(ctx, tb[NFTA_SOCKET_DREG], &priv->dreg,
NULL, NFT_DATA_VALUE, len);
}
static int nft_socket_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_socket *priv = nft_expr_priv(expr);
if (nla_put_be32(skb, NFTA_SOCKET_KEY, htonl(priv->key)))
return -1;
if (nft_dump_register(skb, NFTA_SOCKET_DREG, priv->dreg))
return -1;
if (priv->key == NFT_SOCKET_CGROUPV2 &&
nla_put_be32(skb, NFTA_SOCKET_LEVEL, htonl(priv->level)))
return -1;
return 0;
}
static bool nft_socket_reduce(struct nft_regs_track *track,
const struct nft_expr *expr)
{
const struct nft_socket *priv = nft_expr_priv(expr);
const struct nft_socket *socket;
if (!nft_reg_track_cmp(track, expr, priv->dreg)) {
nft_reg_track_update(track, expr, priv->dreg, priv->len);
return false;
}
socket = nft_expr_priv(track->regs[priv->dreg].selector);
if (priv->key != socket->key ||
priv->dreg != socket->dreg ||
priv->level != socket->level) {
nft_reg_track_update(track, expr, priv->dreg, priv->len);
return false;
}
if (!track->regs[priv->dreg].bitwise)
return true;
return nft_expr_reduce_bitwise(track, expr);
}
static int nft_socket_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
return nft_chain_validate_hooks(ctx->chain,
(1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_LOCAL_OUT));
}
static struct nft_expr_type nft_socket_type;
static const struct nft_expr_ops nft_socket_ops = {
.type = &nft_socket_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_socket)),
.eval = nft_socket_eval,
.init = nft_socket_init,
.dump = nft_socket_dump,
.validate = nft_socket_validate,
.reduce = nft_socket_reduce,
};
static struct nft_expr_type nft_socket_type __read_mostly = {
.name = "socket",
.ops = &nft_socket_ops,
.policy = nft_socket_policy,
.maxattr = NFTA_SOCKET_MAX,
.owner = THIS_MODULE,
};
static int __init nft_socket_module_init(void)
{
return nft_register_expr(&nft_socket_type);
}
static void __exit nft_socket_module_exit(void)
{
nft_unregister_expr(&nft_socket_type);
}
module_init(nft_socket_module_init);
module_exit(nft_socket_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Máté Eckl");
MODULE_DESCRIPTION("nf_tables socket match module");
MODULE_ALIAS_NFT_EXPR("socket");
| linux-master | net/netfilter/nft_socket.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SNMP service broadcast connection tracking helper
*
* (c) 2011 Jiri Olsa <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/in.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <linux/netfilter/nf_conntrack_snmp.h>
#define SNMP_PORT 161
MODULE_AUTHOR("Jiri Olsa <[email protected]>");
MODULE_DESCRIPTION("SNMP service broadcast connection tracking helper");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NFCT_HELPER("snmp");
static unsigned int timeout __read_mostly = 30;
module_param(timeout, uint, 0400);
MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds");
int (*nf_nat_snmp_hook)(struct sk_buff *skb,
unsigned int protoff,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo);
EXPORT_SYMBOL_GPL(nf_nat_snmp_hook);
static int snmp_conntrack_help(struct sk_buff *skb, unsigned int protoff,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo)
{
typeof(nf_nat_snmp_hook) nf_nat_snmp;
nf_conntrack_broadcast_help(skb, ct, ctinfo, timeout);
nf_nat_snmp = rcu_dereference(nf_nat_snmp_hook);
if (nf_nat_snmp && ct->status & IPS_NAT_MASK)
return nf_nat_snmp(skb, protoff, ct, ctinfo);
return NF_ACCEPT;
}
static struct nf_conntrack_expect_policy exp_policy = {
.max_expected = 1,
};
static struct nf_conntrack_helper helper __read_mostly = {
.name = "snmp",
.tuple.src.l3num = NFPROTO_IPV4,
.tuple.src.u.udp.port = cpu_to_be16(SNMP_PORT),
.tuple.dst.protonum = IPPROTO_UDP,
.me = THIS_MODULE,
.help = snmp_conntrack_help,
.expect_policy = &exp_policy,
};
static int __init nf_conntrack_snmp_init(void)
{
exp_policy.timeout = timeout;
return nf_conntrack_helper_register(&helper);
}
static void __exit nf_conntrack_snmp_fini(void)
{
nf_conntrack_helper_unregister(&helper);
}
module_init(nf_conntrack_snmp_init);
module_exit(nf_conntrack_snmp_fini);
| linux-master | net/netfilter/nf_conntrack_snmp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2006 Netfilter Core Team <[email protected]>
* Copyright (c) 2011 Patrick McHardy <[email protected]>
*
* Based on Rusty Russell's IPv4 REDIRECT target. Development of IPv6
* NAT funded by Astaro.
*/
#include <linux/if.h>
#include <linux/inetdevice.h>
#include <linux/ip.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/netfilter.h>
#include <linux/types.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter/x_tables.h>
#include <net/addrconf.h>
#include <net/checksum.h>
#include <net/protocol.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_redirect.h>
static unsigned int
redirect_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{
return nf_nat_redirect_ipv6(skb, par->targinfo, xt_hooknum(par));
}
static int redirect_tg6_checkentry(const struct xt_tgchk_param *par)
{
const struct nf_nat_range2 *range = par->targinfo;
if (range->flags & NF_NAT_RANGE_MAP_IPS)
return -EINVAL;
return nf_ct_netns_get(par->net, par->family);
}
static void redirect_tg_destroy(const struct xt_tgdtor_param *par)
{
nf_ct_netns_put(par->net, par->family);
}
static int redirect_tg4_check(const struct xt_tgchk_param *par)
{
const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
if (mr->range[0].flags & NF_NAT_RANGE_MAP_IPS) {
pr_debug("bad MAP_IPS.\n");
return -EINVAL;
}
if (mr->rangesize != 1) {
pr_debug("bad rangesize %u.\n", mr->rangesize);
return -EINVAL;
}
return nf_ct_netns_get(par->net, par->family);
}
static unsigned int
redirect_tg4(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
struct nf_nat_range2 range = {
.flags = mr->range[0].flags,
.min_proto = mr->range[0].min,
.max_proto = mr->range[0].max,
};
return nf_nat_redirect_ipv4(skb, &range, xt_hooknum(par));
}
static struct xt_target redirect_tg_reg[] __read_mostly = {
{
.name = "REDIRECT",
.family = NFPROTO_IPV6,
.revision = 0,
.table = "nat",
.checkentry = redirect_tg6_checkentry,
.destroy = redirect_tg_destroy,
.target = redirect_tg6,
.targetsize = sizeof(struct nf_nat_range),
.hooks = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_OUT),
.me = THIS_MODULE,
},
{
.name = "REDIRECT",
.family = NFPROTO_IPV4,
.revision = 0,
.table = "nat",
.target = redirect_tg4,
.checkentry = redirect_tg4_check,
.destroy = redirect_tg_destroy,
.targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat),
.hooks = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_OUT),
.me = THIS_MODULE,
},
};
static int __init redirect_tg_init(void)
{
return xt_register_targets(redirect_tg_reg,
ARRAY_SIZE(redirect_tg_reg));
}
static void __exit redirect_tg_exit(void)
{
xt_unregister_targets(redirect_tg_reg, ARRAY_SIZE(redirect_tg_reg));
}
module_init(redirect_tg_init);
module_exit(redirect_tg_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <[email protected]>");
MODULE_DESCRIPTION("Xtables: Connection redirection to localhost");
MODULE_ALIAS("ip6t_REDIRECT");
MODULE_ALIAS("ipt_REDIRECT");
| linux-master | net/netfilter/xt_REDIRECT.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Transparent proxy support for Linux/iptables
*
* Copyright (C) 2007-2008 BalaBit IT Ltd.
* Author: Krisztian Kovacs
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <net/icmp.h>
#include <net/sock.h>
#include <net/inet_sock.h>
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <net/inet6_hashtables.h>
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
#endif
#include <net/netfilter/nf_socket.h>
#include <linux/netfilter/xt_socket.h>
/* "socket" match based redirection (no specific rule)
* ===================================================
*
* There are connections with dynamic endpoints (e.g. FTP data
* connection) that the user is unable to add explicit rules
* for. These are taken care of by a generic "socket" rule. It is
* assumed that the proxy application is trusted to open such
* connections without explicit iptables rule (except of course the
* generic 'socket' rule). In this case the following sockets are
* matched in preference order:
*
* - match: if there's a fully established connection matching the
* _packet_ tuple
*
* - match: if there's a non-zero bound listener (possibly with a
* non-local address) We don't accept zero-bound listeners, since
* then local services could intercept traffic going through the
* box.
*/
static bool
socket_match(const struct sk_buff *skb, struct xt_action_param *par,
const struct xt_socket_mtinfo1 *info)
{
struct sk_buff *pskb = (struct sk_buff *)skb;
struct sock *sk = skb->sk;
if (sk && !net_eq(xt_net(par), sock_net(sk)))
sk = NULL;
if (!sk)
sk = nf_sk_lookup_slow_v4(xt_net(par), skb, xt_in(par));
if (sk) {
bool wildcard;
bool transparent = true;
/* Ignore sockets listening on INADDR_ANY,
* unless XT_SOCKET_NOWILDCARD is set
*/
wildcard = (!(info->flags & XT_SOCKET_NOWILDCARD) &&
sk_fullsock(sk) &&
inet_sk(sk)->inet_rcv_saddr == 0);
/* Ignore non-transparent sockets,
* if XT_SOCKET_TRANSPARENT is used
*/
if (info->flags & XT_SOCKET_TRANSPARENT)
transparent = inet_sk_transparent(sk);
if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard &&
transparent && sk_fullsock(sk))
pskb->mark = READ_ONCE(sk->sk_mark);
if (sk != skb->sk)
sock_gen_put(sk);
if (wildcard || !transparent)
sk = NULL;
}
return sk != NULL;
}
static bool
socket_mt4_v0(const struct sk_buff *skb, struct xt_action_param *par)
{
static struct xt_socket_mtinfo1 xt_info_v0 = {
.flags = 0,
};
return socket_match(skb, par, &xt_info_v0);
}
static bool
socket_mt4_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
{
return socket_match(skb, par, par->matchinfo);
}
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
static bool
socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
struct sk_buff *pskb = (struct sk_buff *)skb;
struct sock *sk = skb->sk;
if (sk && !net_eq(xt_net(par), sock_net(sk)))
sk = NULL;
if (!sk)
sk = nf_sk_lookup_slow_v6(xt_net(par), skb, xt_in(par));
if (sk) {
bool wildcard;
bool transparent = true;
/* Ignore sockets listening on INADDR_ANY
* unless XT_SOCKET_NOWILDCARD is set
*/
wildcard = (!(info->flags & XT_SOCKET_NOWILDCARD) &&
sk_fullsock(sk) &&
ipv6_addr_any(&sk->sk_v6_rcv_saddr));
/* Ignore non-transparent sockets,
* if XT_SOCKET_TRANSPARENT is used
*/
if (info->flags & XT_SOCKET_TRANSPARENT)
transparent = inet_sk_transparent(sk);
if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard &&
transparent && sk_fullsock(sk))
pskb->mark = READ_ONCE(sk->sk_mark);
if (sk != skb->sk)
sock_gen_put(sk);
if (wildcard || !transparent)
sk = NULL;
}
return sk != NULL;
}
#endif
static int socket_mt_enable_defrag(struct net *net, int family)
{
switch (family) {
case NFPROTO_IPV4:
return nf_defrag_ipv4_enable(net);
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
case NFPROTO_IPV6:
return nf_defrag_ipv6_enable(net);
#endif
}
WARN_ONCE(1, "Unknown family %d\n", family);
return 0;
}
static int socket_mt_v1_check(const struct xt_mtchk_param *par)
{
const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
int err;
err = socket_mt_enable_defrag(par->net, par->family);
if (err)
return err;
if (info->flags & ~XT_SOCKET_FLAGS_V1) {
pr_info_ratelimited("unknown flags 0x%x\n",
info->flags & ~XT_SOCKET_FLAGS_V1);
return -EINVAL;
}
return 0;
}
static int socket_mt_v2_check(const struct xt_mtchk_param *par)
{
const struct xt_socket_mtinfo2 *info = (struct xt_socket_mtinfo2 *) par->matchinfo;
int err;
err = socket_mt_enable_defrag(par->net, par->family);
if (err)
return err;
if (info->flags & ~XT_SOCKET_FLAGS_V2) {
pr_info_ratelimited("unknown flags 0x%x\n",
info->flags & ~XT_SOCKET_FLAGS_V2);
return -EINVAL;
}
return 0;
}
static int socket_mt_v3_check(const struct xt_mtchk_param *par)
{
const struct xt_socket_mtinfo3 *info =
(struct xt_socket_mtinfo3 *)par->matchinfo;
int err;
err = socket_mt_enable_defrag(par->net, par->family);
if (err)
return err;
if (info->flags & ~XT_SOCKET_FLAGS_V3) {
pr_info_ratelimited("unknown flags 0x%x\n",
info->flags & ~XT_SOCKET_FLAGS_V3);
return -EINVAL;
}
return 0;
}
static void socket_mt_destroy(const struct xt_mtdtor_param *par)
{
if (par->family == NFPROTO_IPV4)
nf_defrag_ipv4_disable(par->net);
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
else if (par->family == NFPROTO_IPV6)
nf_defrag_ipv6_disable(par->net);
#endif
}
static struct xt_match socket_mt_reg[] __read_mostly = {
{
.name = "socket",
.revision = 0,
.family = NFPROTO_IPV4,
.match = socket_mt4_v0,
.hooks = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN),
.me = THIS_MODULE,
},
{
.name = "socket",
.revision = 1,
.family = NFPROTO_IPV4,
.match = socket_mt4_v1_v2_v3,
.destroy = socket_mt_destroy,
.checkentry = socket_mt_v1_check,
.matchsize = sizeof(struct xt_socket_mtinfo1),
.hooks = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN),
.me = THIS_MODULE,
},
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
{
.name = "socket",
.revision = 1,
.family = NFPROTO_IPV6,
.match = socket_mt6_v1_v2_v3,
.checkentry = socket_mt_v1_check,
.matchsize = sizeof(struct xt_socket_mtinfo1),
.destroy = socket_mt_destroy,
.hooks = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN),
.me = THIS_MODULE,
},
#endif
{
.name = "socket",
.revision = 2,
.family = NFPROTO_IPV4,
.match = socket_mt4_v1_v2_v3,
.checkentry = socket_mt_v2_check,
.destroy = socket_mt_destroy,
.matchsize = sizeof(struct xt_socket_mtinfo1),
.hooks = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN),
.me = THIS_MODULE,
},
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
{
.name = "socket",
.revision = 2,
.family = NFPROTO_IPV6,
.match = socket_mt6_v1_v2_v3,
.checkentry = socket_mt_v2_check,
.destroy = socket_mt_destroy,
.matchsize = sizeof(struct xt_socket_mtinfo1),
.hooks = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN),
.me = THIS_MODULE,
},
#endif
{
.name = "socket",
.revision = 3,
.family = NFPROTO_IPV4,
.match = socket_mt4_v1_v2_v3,
.checkentry = socket_mt_v3_check,
.destroy = socket_mt_destroy,
.matchsize = sizeof(struct xt_socket_mtinfo1),
.hooks = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN),
.me = THIS_MODULE,
},
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
{
.name = "socket",
.revision = 3,
.family = NFPROTO_IPV6,
.match = socket_mt6_v1_v2_v3,
.checkentry = socket_mt_v3_check,
.destroy = socket_mt_destroy,
.matchsize = sizeof(struct xt_socket_mtinfo1),
.hooks = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN),
.me = THIS_MODULE,
},
#endif
};
static int __init socket_mt_init(void)
{
return xt_register_matches(socket_mt_reg, ARRAY_SIZE(socket_mt_reg));
}
static void __exit socket_mt_exit(void)
{
xt_unregister_matches(socket_mt_reg, ARRAY_SIZE(socket_mt_reg));
}
module_init(socket_mt_init);
module_exit(socket_mt_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Krisztian Kovacs, Balazs Scheidler");
MODULE_DESCRIPTION("x_tables socket match module");
MODULE_ALIAS("ipt_socket");
MODULE_ALIAS("ip6t_socket");
| linux-master | net/netfilter/xt_socket.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016 Pablo Neira Ayuso <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/atomic.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
struct nft_quota {
atomic64_t quota;
unsigned long flags;
atomic64_t *consumed;
};
static inline bool nft_overquota(struct nft_quota *priv,
const struct sk_buff *skb)
{
return atomic64_add_return(skb->len, priv->consumed) >=
atomic64_read(&priv->quota);
}
static inline bool nft_quota_invert(struct nft_quota *priv)
{
return priv->flags & NFT_QUOTA_F_INV;
}
static inline void nft_quota_do_eval(struct nft_quota *priv,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
if (nft_overquota(priv, pkt->skb) ^ nft_quota_invert(priv))
regs->verdict.code = NFT_BREAK;
}
static const struct nla_policy nft_quota_policy[NFTA_QUOTA_MAX + 1] = {
[NFTA_QUOTA_BYTES] = { .type = NLA_U64 },
[NFTA_QUOTA_FLAGS] = { .type = NLA_U32 },
[NFTA_QUOTA_CONSUMED] = { .type = NLA_U64 },
};
#define NFT_QUOTA_DEPLETED_BIT 1 /* From NFT_QUOTA_F_DEPLETED. */
static void nft_quota_obj_eval(struct nft_object *obj,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_quota *priv = nft_obj_data(obj);
bool overquota;
overquota = nft_overquota(priv, pkt->skb);
if (overquota ^ nft_quota_invert(priv))
regs->verdict.code = NFT_BREAK;
if (overquota &&
!test_and_set_bit(NFT_QUOTA_DEPLETED_BIT, &priv->flags))
nft_obj_notify(nft_net(pkt), obj->key.table, obj, 0, 0,
NFT_MSG_NEWOBJ, 0, nft_pf(pkt), 0, GFP_ATOMIC);
}
static int nft_quota_do_init(const struct nlattr * const tb[],
struct nft_quota *priv)
{
unsigned long flags = 0;
u64 quota, consumed = 0;
if (!tb[NFTA_QUOTA_BYTES])
return -EINVAL;
quota = be64_to_cpu(nla_get_be64(tb[NFTA_QUOTA_BYTES]));
if (quota > S64_MAX)
return -EOVERFLOW;
if (tb[NFTA_QUOTA_CONSUMED]) {
consumed = be64_to_cpu(nla_get_be64(tb[NFTA_QUOTA_CONSUMED]));
if (consumed > quota)
return -EINVAL;
}
if (tb[NFTA_QUOTA_FLAGS]) {
flags = ntohl(nla_get_be32(tb[NFTA_QUOTA_FLAGS]));
if (flags & ~NFT_QUOTA_F_INV)
return -EINVAL;
if (flags & NFT_QUOTA_F_DEPLETED)
return -EOPNOTSUPP;
}
priv->consumed = kmalloc(sizeof(*priv->consumed), GFP_KERNEL_ACCOUNT);
if (!priv->consumed)
return -ENOMEM;
atomic64_set(&priv->quota, quota);
priv->flags = flags;
atomic64_set(priv->consumed, consumed);
return 0;
}
static void nft_quota_do_destroy(const struct nft_ctx *ctx,
struct nft_quota *priv)
{
kfree(priv->consumed);
}
static int nft_quota_obj_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[],
struct nft_object *obj)
{
struct nft_quota *priv = nft_obj_data(obj);
return nft_quota_do_init(tb, priv);
}
static void nft_quota_obj_update(struct nft_object *obj,
struct nft_object *newobj)
{
struct nft_quota *newpriv = nft_obj_data(newobj);
struct nft_quota *priv = nft_obj_data(obj);
u64 newquota;
newquota = atomic64_read(&newpriv->quota);
atomic64_set(&priv->quota, newquota);
priv->flags = newpriv->flags;
}
static int nft_quota_do_dump(struct sk_buff *skb, struct nft_quota *priv,
bool reset)
{
u64 consumed, consumed_cap, quota;
u32 flags = priv->flags;
/* Since we inconditionally increment consumed quota for each packet
* that we see, don't go over the quota boundary in what we send to
* userspace.
*/
consumed = atomic64_read(priv->consumed);
quota = atomic64_read(&priv->quota);
if (consumed >= quota) {
consumed_cap = quota;
flags |= NFT_QUOTA_F_DEPLETED;
} else {
consumed_cap = consumed;
}
if (nla_put_be64(skb, NFTA_QUOTA_BYTES, cpu_to_be64(quota),
NFTA_QUOTA_PAD) ||
nla_put_be64(skb, NFTA_QUOTA_CONSUMED, cpu_to_be64(consumed_cap),
NFTA_QUOTA_PAD) ||
nla_put_be32(skb, NFTA_QUOTA_FLAGS, htonl(flags)))
goto nla_put_failure;
if (reset) {
atomic64_sub(consumed, priv->consumed);
clear_bit(NFT_QUOTA_DEPLETED_BIT, &priv->flags);
}
return 0;
nla_put_failure:
return -1;
}
static int nft_quota_obj_dump(struct sk_buff *skb, struct nft_object *obj,
bool reset)
{
struct nft_quota *priv = nft_obj_data(obj);
return nft_quota_do_dump(skb, priv, reset);
}
static void nft_quota_obj_destroy(const struct nft_ctx *ctx,
struct nft_object *obj)
{
struct nft_quota *priv = nft_obj_data(obj);
return nft_quota_do_destroy(ctx, priv);
}
static struct nft_object_type nft_quota_obj_type;
static const struct nft_object_ops nft_quota_obj_ops = {
.type = &nft_quota_obj_type,
.size = sizeof(struct nft_quota),
.init = nft_quota_obj_init,
.destroy = nft_quota_obj_destroy,
.eval = nft_quota_obj_eval,
.dump = nft_quota_obj_dump,
.update = nft_quota_obj_update,
};
static struct nft_object_type nft_quota_obj_type __read_mostly = {
.type = NFT_OBJECT_QUOTA,
.ops = &nft_quota_obj_ops,
.maxattr = NFTA_QUOTA_MAX,
.policy = nft_quota_policy,
.owner = THIS_MODULE,
};
static void nft_quota_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_quota *priv = nft_expr_priv(expr);
nft_quota_do_eval(priv, regs, pkt);
}
static int nft_quota_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_quota *priv = nft_expr_priv(expr);
return nft_quota_do_init(tb, priv);
}
static int nft_quota_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
struct nft_quota *priv = nft_expr_priv(expr);
return nft_quota_do_dump(skb, priv, reset);
}
static void nft_quota_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
struct nft_quota *priv = nft_expr_priv(expr);
return nft_quota_do_destroy(ctx, priv);
}
static int nft_quota_clone(struct nft_expr *dst, const struct nft_expr *src)
{
struct nft_quota *priv_dst = nft_expr_priv(dst);
struct nft_quota *priv_src = nft_expr_priv(src);
priv_dst->quota = priv_src->quota;
priv_dst->flags = priv_src->flags;
priv_dst->consumed = kmalloc(sizeof(*priv_dst->consumed), GFP_ATOMIC);
if (!priv_dst->consumed)
return -ENOMEM;
*priv_dst->consumed = *priv_src->consumed;
return 0;
}
static struct nft_expr_type nft_quota_type;
static const struct nft_expr_ops nft_quota_ops = {
.type = &nft_quota_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_quota)),
.eval = nft_quota_eval,
.init = nft_quota_init,
.destroy = nft_quota_destroy,
.clone = nft_quota_clone,
.dump = nft_quota_dump,
.reduce = NFT_REDUCE_READONLY,
};
static struct nft_expr_type nft_quota_type __read_mostly = {
.name = "quota",
.ops = &nft_quota_ops,
.policy = nft_quota_policy,
.maxattr = NFTA_QUOTA_MAX,
.flags = NFT_EXPR_STATEFUL,
.owner = THIS_MODULE,
};
static int __init nft_quota_module_init(void)
{
int err;
err = nft_register_obj(&nft_quota_obj_type);
if (err < 0)
return err;
err = nft_register_expr(&nft_quota_type);
if (err < 0)
goto err1;
return 0;
err1:
nft_unregister_obj(&nft_quota_obj_type);
return err;
}
static void __exit nft_quota_module_exit(void)
{
nft_unregister_expr(&nft_quota_type);
nft_unregister_obj(&nft_quota_obj_type);
}
module_init(nft_quota_module_init);
module_exit(nft_quota_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <[email protected]>");
MODULE_ALIAS_NFT_EXPR("quota");
MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_QUOTA);
MODULE_DESCRIPTION("Netfilter nftables quota module");
| linux-master | net/netfilter/nft_quota.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Connection tracking protocol helper module for GRE.
*
* GRE is a generic encapsulation protocol, which is generally not very
* suited for NAT, as it has no protocol-specific part as port numbers.
*
* It has an optional key field, which may help us distinguishing two
* connections between the same two hosts.
*
* GRE is defined in RFC 1701 and RFC 1702, as well as RFC 2784
*
* PPTP is built on top of a modified version of GRE, and has a mandatory
* field called "CallID", which serves us for the same purpose as the key
* field in plain GRE.
*
* Documentation about PPTP can be found in RFC 2637
*
* (C) 2000-2005 by Harald Welte <[email protected]>
*
* Development of this code funded by Astaro AG (http://www.astaro.com/)
*
* (C) 2006-2012 Patrick McHardy <[email protected]>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/seq_file.h>
#include <linux/in.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <net/dst.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_timeout.h>
#include <linux/netfilter/nf_conntrack_proto_gre.h>
#include <linux/netfilter/nf_conntrack_pptp.h>
static const unsigned int gre_timeouts[GRE_CT_MAX] = {
[GRE_CT_UNREPLIED] = 30*HZ,
[GRE_CT_REPLIED] = 180*HZ,
};
/* used when expectation is added */
static DEFINE_SPINLOCK(keymap_lock);
static inline struct nf_gre_net *gre_pernet(struct net *net)
{
return &net->ct.nf_ct_proto.gre;
}
static inline int gre_key_cmpfn(const struct nf_ct_gre_keymap *km,
const struct nf_conntrack_tuple *t)
{
return km->tuple.src.l3num == t->src.l3num &&
!memcmp(&km->tuple.src.u3, &t->src.u3, sizeof(t->src.u3)) &&
!memcmp(&km->tuple.dst.u3, &t->dst.u3, sizeof(t->dst.u3)) &&
km->tuple.dst.protonum == t->dst.protonum &&
km->tuple.dst.u.all == t->dst.u.all;
}
/* look up the source key for a given tuple */
static __be16 gre_keymap_lookup(struct net *net, struct nf_conntrack_tuple *t)
{
struct nf_gre_net *net_gre = gre_pernet(net);
struct nf_ct_gre_keymap *km;
__be16 key = 0;
list_for_each_entry_rcu(km, &net_gre->keymap_list, list) {
if (gre_key_cmpfn(km, t)) {
key = km->tuple.src.u.gre.key;
break;
}
}
pr_debug("lookup src key 0x%x for ", key);
nf_ct_dump_tuple(t);
return key;
}
/* add a single keymap entry, associate with specified master ct */
int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
struct nf_conntrack_tuple *t)
{
struct net *net = nf_ct_net(ct);
struct nf_gre_net *net_gre = gre_pernet(net);
struct nf_ct_pptp_master *ct_pptp_info = nfct_help_data(ct);
struct nf_ct_gre_keymap **kmp, *km;
kmp = &ct_pptp_info->keymap[dir];
if (*kmp) {
/* check whether it's a retransmission */
list_for_each_entry_rcu(km, &net_gre->keymap_list, list) {
if (gre_key_cmpfn(km, t) && km == *kmp)
return 0;
}
pr_debug("trying to override keymap_%s for ct %p\n",
dir == IP_CT_DIR_REPLY ? "reply" : "orig", ct);
return -EEXIST;
}
km = kmalloc(sizeof(*km), GFP_ATOMIC);
if (!km)
return -ENOMEM;
memcpy(&km->tuple, t, sizeof(*t));
*kmp = km;
pr_debug("adding new entry %p: ", km);
nf_ct_dump_tuple(&km->tuple);
spin_lock_bh(&keymap_lock);
list_add_tail(&km->list, &net_gre->keymap_list);
spin_unlock_bh(&keymap_lock);
return 0;
}
EXPORT_SYMBOL_GPL(nf_ct_gre_keymap_add);
/* destroy the keymap entries associated with specified master ct */
void nf_ct_gre_keymap_destroy(struct nf_conn *ct)
{
struct nf_ct_pptp_master *ct_pptp_info = nfct_help_data(ct);
enum ip_conntrack_dir dir;
pr_debug("entering for ct %p\n", ct);
spin_lock_bh(&keymap_lock);
for (dir = IP_CT_DIR_ORIGINAL; dir < IP_CT_DIR_MAX; dir++) {
if (ct_pptp_info->keymap[dir]) {
pr_debug("removing %p from list\n",
ct_pptp_info->keymap[dir]);
list_del_rcu(&ct_pptp_info->keymap[dir]->list);
kfree_rcu(ct_pptp_info->keymap[dir], rcu);
ct_pptp_info->keymap[dir] = NULL;
}
}
spin_unlock_bh(&keymap_lock);
}
EXPORT_SYMBOL_GPL(nf_ct_gre_keymap_destroy);
/* PUBLIC CONNTRACK PROTO HELPER FUNCTIONS */
/* gre hdr info to tuple */
bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
struct net *net, struct nf_conntrack_tuple *tuple)
{
const struct pptp_gre_header *pgrehdr;
struct pptp_gre_header _pgrehdr;
__be16 srckey;
const struct gre_base_hdr *grehdr;
struct gre_base_hdr _grehdr;
/* first only delinearize old RFC1701 GRE header */
grehdr = skb_header_pointer(skb, dataoff, sizeof(_grehdr), &_grehdr);
if (!grehdr || (grehdr->flags & GRE_VERSION) != GRE_VERSION_1) {
/* try to behave like "nf_conntrack_proto_generic" */
tuple->src.u.all = 0;
tuple->dst.u.all = 0;
return true;
}
/* PPTP header is variable length, only need up to the call_id field */
pgrehdr = skb_header_pointer(skb, dataoff, 8, &_pgrehdr);
if (!pgrehdr)
return true;
if (grehdr->protocol != GRE_PROTO_PPP) {
pr_debug("Unsupported GRE proto(0x%x)\n", ntohs(grehdr->protocol));
return false;
}
tuple->dst.u.gre.key = pgrehdr->call_id;
srckey = gre_keymap_lookup(net, tuple);
tuple->src.u.gre.key = srckey;
return true;
}
#ifdef CONFIG_NF_CONNTRACK_PROCFS
/* print private data for conntrack */
static void gre_print_conntrack(struct seq_file *s, struct nf_conn *ct)
{
seq_printf(s, "timeout=%u, stream_timeout=%u ",
(ct->proto.gre.timeout / HZ),
(ct->proto.gre.stream_timeout / HZ));
}
#endif
static unsigned int *gre_get_timeouts(struct net *net)
{
return gre_pernet(net)->timeouts;
}
/* Returns verdict for packet, and may modify conntrack */
int nf_conntrack_gre_packet(struct nf_conn *ct,
struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
const struct nf_hook_state *state)
{
unsigned long status;
if (!nf_ct_is_confirmed(ct)) {
unsigned int *timeouts = nf_ct_timeout_lookup(ct);
if (!timeouts)
timeouts = gre_get_timeouts(nf_ct_net(ct));
/* initialize to sane value. Ideally a conntrack helper
* (e.g. in case of pptp) is increasing them */
ct->proto.gre.stream_timeout = timeouts[GRE_CT_REPLIED];
ct->proto.gre.timeout = timeouts[GRE_CT_UNREPLIED];
}
status = READ_ONCE(ct->status);
/* If we've seen traffic both ways, this is a GRE connection.
* Extend timeout. */
if (status & IPS_SEEN_REPLY) {
nf_ct_refresh_acct(ct, ctinfo, skb,
ct->proto.gre.stream_timeout);
/* never set ASSURED for IPS_NAT_CLASH, they time out soon */
if (unlikely((status & IPS_NAT_CLASH)))
return NF_ACCEPT;
/* Also, more likely to be important, and not a probe. */
if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
nf_conntrack_event_cache(IPCT_ASSURED, ct);
} else
nf_ct_refresh_acct(ct, ctinfo, skb,
ct->proto.gre.timeout);
return NF_ACCEPT;
}
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
static int gre_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
unsigned int *timeouts = data;
struct nf_gre_net *net_gre = gre_pernet(net);
if (!timeouts)
timeouts = gre_get_timeouts(net);
/* set default timeouts for GRE. */
timeouts[GRE_CT_UNREPLIED] = net_gre->timeouts[GRE_CT_UNREPLIED];
timeouts[GRE_CT_REPLIED] = net_gre->timeouts[GRE_CT_REPLIED];
if (tb[CTA_TIMEOUT_GRE_UNREPLIED]) {
timeouts[GRE_CT_UNREPLIED] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_GRE_UNREPLIED])) * HZ;
}
if (tb[CTA_TIMEOUT_GRE_REPLIED]) {
timeouts[GRE_CT_REPLIED] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_GRE_REPLIED])) * HZ;
}
return 0;
}
static int
gre_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
{
const unsigned int *timeouts = data;
if (nla_put_be32(skb, CTA_TIMEOUT_GRE_UNREPLIED,
htonl(timeouts[GRE_CT_UNREPLIED] / HZ)) ||
nla_put_be32(skb, CTA_TIMEOUT_GRE_REPLIED,
htonl(timeouts[GRE_CT_REPLIED] / HZ)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -ENOSPC;
}
static const struct nla_policy
gre_timeout_nla_policy[CTA_TIMEOUT_GRE_MAX+1] = {
[CTA_TIMEOUT_GRE_UNREPLIED] = { .type = NLA_U32 },
[CTA_TIMEOUT_GRE_REPLIED] = { .type = NLA_U32 },
};
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
void nf_conntrack_gre_init_net(struct net *net)
{
struct nf_gre_net *net_gre = gre_pernet(net);
int i;
INIT_LIST_HEAD(&net_gre->keymap_list);
for (i = 0; i < GRE_CT_MAX; i++)
net_gre->timeouts[i] = gre_timeouts[i];
}
/* protocol helper struct */
const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre = {
.l4proto = IPPROTO_GRE,
.allow_clash = true,
#ifdef CONFIG_NF_CONNTRACK_PROCFS
.print_conntrack = gre_print_conntrack,
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = gre_timeout_nlattr_to_obj,
.obj_to_nlattr = gre_timeout_obj_to_nlattr,
.nlattr_max = CTA_TIMEOUT_GRE_MAX,
.obj_size = sizeof(unsigned int) * GRE_CT_MAX,
.nla_policy = gre_timeout_nla_policy,
},
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
};
| linux-master | net/netfilter/nf_conntrack_proto_gre.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Unstable Conntrack Helpers for XDP and TC-BPF hook
*
* These are called from the XDP and SCHED_CLS BPF programs. Note that it is
* allowed to break compatibility for these functions since the interface they
* are exposed through to BPF programs is explicitly unstable.
*/
#include <linux/bpf_verifier.h>
#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/filter.h>
#include <linux/mutex.h>
#include <linux/types.h>
#include <linux/btf_ids.h>
#include <linux/net_namespace.h>
#include <net/xdp.h>
#include <net/netfilter/nf_conntrack_bpf.h>
#include <net/netfilter/nf_conntrack_core.h>
/* bpf_ct_opts - Options for CT lookup helpers
*
* Members:
* @netns_id - Specify the network namespace for lookup
* Values:
* BPF_F_CURRENT_NETNS (-1)
* Use namespace associated with ctx (xdp_md, __sk_buff)
* [0, S32_MAX]
* Network Namespace ID
* @error - Out parameter, set for any errors encountered
* Values:
* -EINVAL - Passed NULL for bpf_tuple pointer
* -EINVAL - opts->reserved is not 0
* -EINVAL - netns_id is less than -1
* -EINVAL - opts__sz isn't NF_BPF_CT_OPTS_SZ (12)
* -EPROTO - l4proto isn't one of IPPROTO_TCP or IPPROTO_UDP
* -ENONET - No network namespace found for netns_id
* -ENOENT - Conntrack lookup could not find entry for tuple
* -EAFNOSUPPORT - tuple__sz isn't one of sizeof(tuple->ipv4)
* or sizeof(tuple->ipv6)
* @l4proto - Layer 4 protocol
* Values:
* IPPROTO_TCP, IPPROTO_UDP
* @dir: - connection tracking tuple direction.
* @reserved - Reserved member, will be reused for more options in future
* Values:
* 0
*/
struct bpf_ct_opts {
s32 netns_id;
s32 error;
u8 l4proto;
u8 dir;
u8 reserved[2];
};
enum {
NF_BPF_CT_OPTS_SZ = 12,
};
static int bpf_nf_ct_tuple_parse(struct bpf_sock_tuple *bpf_tuple,
u32 tuple_len, u8 protonum, u8 dir,
struct nf_conntrack_tuple *tuple)
{
union nf_inet_addr *src = dir ? &tuple->dst.u3 : &tuple->src.u3;
union nf_inet_addr *dst = dir ? &tuple->src.u3 : &tuple->dst.u3;
union nf_conntrack_man_proto *sport = dir ? (void *)&tuple->dst.u
: &tuple->src.u;
union nf_conntrack_man_proto *dport = dir ? &tuple->src.u
: (void *)&tuple->dst.u;
if (unlikely(protonum != IPPROTO_TCP && protonum != IPPROTO_UDP))
return -EPROTO;
memset(tuple, 0, sizeof(*tuple));
switch (tuple_len) {
case sizeof(bpf_tuple->ipv4):
tuple->src.l3num = AF_INET;
src->ip = bpf_tuple->ipv4.saddr;
sport->tcp.port = bpf_tuple->ipv4.sport;
dst->ip = bpf_tuple->ipv4.daddr;
dport->tcp.port = bpf_tuple->ipv4.dport;
break;
case sizeof(bpf_tuple->ipv6):
tuple->src.l3num = AF_INET6;
memcpy(src->ip6, bpf_tuple->ipv6.saddr, sizeof(bpf_tuple->ipv6.saddr));
sport->tcp.port = bpf_tuple->ipv6.sport;
memcpy(dst->ip6, bpf_tuple->ipv6.daddr, sizeof(bpf_tuple->ipv6.daddr));
dport->tcp.port = bpf_tuple->ipv6.dport;
break;
default:
return -EAFNOSUPPORT;
}
tuple->dst.protonum = protonum;
tuple->dst.dir = dir;
return 0;
}
static struct nf_conn *
__bpf_nf_ct_alloc_entry(struct net *net, struct bpf_sock_tuple *bpf_tuple,
u32 tuple_len, struct bpf_ct_opts *opts, u32 opts_len,
u32 timeout)
{
struct nf_conntrack_tuple otuple, rtuple;
struct nf_conn *ct;
int err;
if (!opts || !bpf_tuple || opts->reserved[0] || opts->reserved[1] ||
opts_len != NF_BPF_CT_OPTS_SZ)
return ERR_PTR(-EINVAL);
if (unlikely(opts->netns_id < BPF_F_CURRENT_NETNS))
return ERR_PTR(-EINVAL);
err = bpf_nf_ct_tuple_parse(bpf_tuple, tuple_len, opts->l4proto,
IP_CT_DIR_ORIGINAL, &otuple);
if (err < 0)
return ERR_PTR(err);
err = bpf_nf_ct_tuple_parse(bpf_tuple, tuple_len, opts->l4proto,
IP_CT_DIR_REPLY, &rtuple);
if (err < 0)
return ERR_PTR(err);
if (opts->netns_id >= 0) {
net = get_net_ns_by_id(net, opts->netns_id);
if (unlikely(!net))
return ERR_PTR(-ENONET);
}
ct = nf_conntrack_alloc(net, &nf_ct_zone_dflt, &otuple, &rtuple,
GFP_ATOMIC);
if (IS_ERR(ct))
goto out;
memset(&ct->proto, 0, sizeof(ct->proto));
__nf_ct_set_timeout(ct, timeout * HZ);
out:
if (opts->netns_id >= 0)
put_net(net);
return ct;
}
static struct nf_conn *__bpf_nf_ct_lookup(struct net *net,
struct bpf_sock_tuple *bpf_tuple,
u32 tuple_len, struct bpf_ct_opts *opts,
u32 opts_len)
{
struct nf_conntrack_tuple_hash *hash;
struct nf_conntrack_tuple tuple;
struct nf_conn *ct;
int err;
if (!opts || !bpf_tuple || opts->reserved[0] || opts->reserved[1] ||
opts_len != NF_BPF_CT_OPTS_SZ)
return ERR_PTR(-EINVAL);
if (unlikely(opts->l4proto != IPPROTO_TCP && opts->l4proto != IPPROTO_UDP))
return ERR_PTR(-EPROTO);
if (unlikely(opts->netns_id < BPF_F_CURRENT_NETNS))
return ERR_PTR(-EINVAL);
err = bpf_nf_ct_tuple_parse(bpf_tuple, tuple_len, opts->l4proto,
IP_CT_DIR_ORIGINAL, &tuple);
if (err < 0)
return ERR_PTR(err);
if (opts->netns_id >= 0) {
net = get_net_ns_by_id(net, opts->netns_id);
if (unlikely(!net))
return ERR_PTR(-ENONET);
}
hash = nf_conntrack_find_get(net, &nf_ct_zone_dflt, &tuple);
if (opts->netns_id >= 0)
put_net(net);
if (!hash)
return ERR_PTR(-ENOENT);
ct = nf_ct_tuplehash_to_ctrack(hash);
opts->dir = NF_CT_DIRECTION(hash);
return ct;
}
BTF_ID_LIST(btf_nf_conn_ids)
BTF_ID(struct, nf_conn)
BTF_ID(struct, nf_conn___init)
/* Check writes into `struct nf_conn` */
static int _nf_conntrack_btf_struct_access(struct bpf_verifier_log *log,
const struct bpf_reg_state *reg,
int off, int size)
{
const struct btf_type *ncit, *nct, *t;
size_t end;
ncit = btf_type_by_id(reg->btf, btf_nf_conn_ids[1]);
nct = btf_type_by_id(reg->btf, btf_nf_conn_ids[0]);
t = btf_type_by_id(reg->btf, reg->btf_id);
if (t != nct && t != ncit) {
bpf_log(log, "only read is supported\n");
return -EACCES;
}
/* `struct nf_conn` and `struct nf_conn___init` have the same layout
* so we are safe to simply merge offset checks here
*/
switch (off) {
#if defined(CONFIG_NF_CONNTRACK_MARK)
case offsetof(struct nf_conn, mark):
end = offsetofend(struct nf_conn, mark);
break;
#endif
default:
bpf_log(log, "no write support to nf_conn at off %d\n", off);
return -EACCES;
}
if (off + size > end) {
bpf_log(log,
"write access at off %d with size %d beyond the member of nf_conn ended at %zu\n",
off, size, end);
return -EACCES;
}
return 0;
}
__diag_push();
__diag_ignore_all("-Wmissing-prototypes",
"Global functions as their definitions will be in nf_conntrack BTF");
/* bpf_xdp_ct_alloc - Allocate a new CT entry
*
* Parameters:
* @xdp_ctx - Pointer to ctx (xdp_md) in XDP program
* Cannot be NULL
* @bpf_tuple - Pointer to memory representing the tuple to look up
* Cannot be NULL
* @tuple__sz - Length of the tuple structure
* Must be one of sizeof(bpf_tuple->ipv4) or
* sizeof(bpf_tuple->ipv6)
* @opts - Additional options for allocation (documented above)
* Cannot be NULL
* @opts__sz - Length of the bpf_ct_opts structure
* Must be NF_BPF_CT_OPTS_SZ (12)
*/
__bpf_kfunc struct nf_conn___init *
bpf_xdp_ct_alloc(struct xdp_md *xdp_ctx, struct bpf_sock_tuple *bpf_tuple,
u32 tuple__sz, struct bpf_ct_opts *opts, u32 opts__sz)
{
struct xdp_buff *ctx = (struct xdp_buff *)xdp_ctx;
struct nf_conn *nfct;
nfct = __bpf_nf_ct_alloc_entry(dev_net(ctx->rxq->dev), bpf_tuple, tuple__sz,
opts, opts__sz, 10);
if (IS_ERR(nfct)) {
if (opts)
opts->error = PTR_ERR(nfct);
return NULL;
}
return (struct nf_conn___init *)nfct;
}
/* bpf_xdp_ct_lookup - Lookup CT entry for the given tuple, and acquire a
* reference to it
*
* Parameters:
* @xdp_ctx - Pointer to ctx (xdp_md) in XDP program
* Cannot be NULL
* @bpf_tuple - Pointer to memory representing the tuple to look up
* Cannot be NULL
* @tuple__sz - Length of the tuple structure
* Must be one of sizeof(bpf_tuple->ipv4) or
* sizeof(bpf_tuple->ipv6)
* @opts - Additional options for lookup (documented above)
* Cannot be NULL
* @opts__sz - Length of the bpf_ct_opts structure
* Must be NF_BPF_CT_OPTS_SZ (12)
*/
__bpf_kfunc struct nf_conn *
bpf_xdp_ct_lookup(struct xdp_md *xdp_ctx, struct bpf_sock_tuple *bpf_tuple,
u32 tuple__sz, struct bpf_ct_opts *opts, u32 opts__sz)
{
struct xdp_buff *ctx = (struct xdp_buff *)xdp_ctx;
struct net *caller_net;
struct nf_conn *nfct;
caller_net = dev_net(ctx->rxq->dev);
nfct = __bpf_nf_ct_lookup(caller_net, bpf_tuple, tuple__sz, opts, opts__sz);
if (IS_ERR(nfct)) {
if (opts)
opts->error = PTR_ERR(nfct);
return NULL;
}
return nfct;
}
/* bpf_skb_ct_alloc - Allocate a new CT entry
*
* Parameters:
* @skb_ctx - Pointer to ctx (__sk_buff) in TC program
* Cannot be NULL
* @bpf_tuple - Pointer to memory representing the tuple to look up
* Cannot be NULL
* @tuple__sz - Length of the tuple structure
* Must be one of sizeof(bpf_tuple->ipv4) or
* sizeof(bpf_tuple->ipv6)
* @opts - Additional options for allocation (documented above)
* Cannot be NULL
* @opts__sz - Length of the bpf_ct_opts structure
* Must be NF_BPF_CT_OPTS_SZ (12)
*/
__bpf_kfunc struct nf_conn___init *
bpf_skb_ct_alloc(struct __sk_buff *skb_ctx, struct bpf_sock_tuple *bpf_tuple,
u32 tuple__sz, struct bpf_ct_opts *opts, u32 opts__sz)
{
struct sk_buff *skb = (struct sk_buff *)skb_ctx;
struct nf_conn *nfct;
struct net *net;
net = skb->dev ? dev_net(skb->dev) : sock_net(skb->sk);
nfct = __bpf_nf_ct_alloc_entry(net, bpf_tuple, tuple__sz, opts, opts__sz, 10);
if (IS_ERR(nfct)) {
if (opts)
opts->error = PTR_ERR(nfct);
return NULL;
}
return (struct nf_conn___init *)nfct;
}
/* bpf_skb_ct_lookup - Lookup CT entry for the given tuple, and acquire a
* reference to it
*
* Parameters:
* @skb_ctx - Pointer to ctx (__sk_buff) in TC program
* Cannot be NULL
* @bpf_tuple - Pointer to memory representing the tuple to look up
* Cannot be NULL
* @tuple__sz - Length of the tuple structure
* Must be one of sizeof(bpf_tuple->ipv4) or
* sizeof(bpf_tuple->ipv6)
* @opts - Additional options for lookup (documented above)
* Cannot be NULL
* @opts__sz - Length of the bpf_ct_opts structure
* Must be NF_BPF_CT_OPTS_SZ (12)
*/
__bpf_kfunc struct nf_conn *
bpf_skb_ct_lookup(struct __sk_buff *skb_ctx, struct bpf_sock_tuple *bpf_tuple,
u32 tuple__sz, struct bpf_ct_opts *opts, u32 opts__sz)
{
struct sk_buff *skb = (struct sk_buff *)skb_ctx;
struct net *caller_net;
struct nf_conn *nfct;
caller_net = skb->dev ? dev_net(skb->dev) : sock_net(skb->sk);
nfct = __bpf_nf_ct_lookup(caller_net, bpf_tuple, tuple__sz, opts, opts__sz);
if (IS_ERR(nfct)) {
if (opts)
opts->error = PTR_ERR(nfct);
return NULL;
}
return nfct;
}
/* bpf_ct_insert_entry - Add the provided entry into a CT map
*
* This must be invoked for referenced PTR_TO_BTF_ID.
*
* @nfct - Pointer to referenced nf_conn___init object, obtained
* using bpf_xdp_ct_alloc or bpf_skb_ct_alloc.
*/
__bpf_kfunc struct nf_conn *bpf_ct_insert_entry(struct nf_conn___init *nfct_i)
{
struct nf_conn *nfct = (struct nf_conn *)nfct_i;
int err;
if (!nf_ct_is_confirmed(nfct))
nfct->timeout += nfct_time_stamp;
nfct->status |= IPS_CONFIRMED;
err = nf_conntrack_hash_check_insert(nfct);
if (err < 0) {
nf_conntrack_free(nfct);
return NULL;
}
return nfct;
}
/* bpf_ct_release - Release acquired nf_conn object
*
* This must be invoked for referenced PTR_TO_BTF_ID, and the verifier rejects
* the program if any references remain in the program in all of the explored
* states.
*
* Parameters:
* @nf_conn - Pointer to referenced nf_conn object, obtained using
* bpf_xdp_ct_lookup or bpf_skb_ct_lookup.
*/
__bpf_kfunc void bpf_ct_release(struct nf_conn *nfct)
{
nf_ct_put(nfct);
}
/* bpf_ct_set_timeout - Set timeout of allocated nf_conn
*
* Sets the default timeout of newly allocated nf_conn before insertion.
* This helper must be invoked for refcounted pointer to nf_conn___init.
*
* Parameters:
* @nfct - Pointer to referenced nf_conn object, obtained using
* bpf_xdp_ct_alloc or bpf_skb_ct_alloc.
* @timeout - Timeout in msecs.
*/
__bpf_kfunc void bpf_ct_set_timeout(struct nf_conn___init *nfct, u32 timeout)
{
__nf_ct_set_timeout((struct nf_conn *)nfct, msecs_to_jiffies(timeout));
}
/* bpf_ct_change_timeout - Change timeout of inserted nf_conn
*
* Change timeout associated of the inserted or looked up nf_conn.
* This helper must be invoked for refcounted pointer to nf_conn.
*
* Parameters:
* @nfct - Pointer to referenced nf_conn object, obtained using
* bpf_ct_insert_entry, bpf_xdp_ct_lookup, or bpf_skb_ct_lookup.
* @timeout - New timeout in msecs.
*/
__bpf_kfunc int bpf_ct_change_timeout(struct nf_conn *nfct, u32 timeout)
{
return __nf_ct_change_timeout(nfct, msecs_to_jiffies(timeout));
}
/* bpf_ct_set_status - Set status field of allocated nf_conn
*
* Set the status field of the newly allocated nf_conn before insertion.
* This must be invoked for referenced PTR_TO_BTF_ID to nf_conn___init.
*
* Parameters:
* @nfct - Pointer to referenced nf_conn object, obtained using
* bpf_xdp_ct_alloc or bpf_skb_ct_alloc.
* @status - New status value.
*/
__bpf_kfunc int bpf_ct_set_status(const struct nf_conn___init *nfct, u32 status)
{
return nf_ct_change_status_common((struct nf_conn *)nfct, status);
}
/* bpf_ct_change_status - Change status of inserted nf_conn
*
* Change the status field of the provided connection tracking entry.
* This must be invoked for referenced PTR_TO_BTF_ID to nf_conn.
*
* Parameters:
* @nfct - Pointer to referenced nf_conn object, obtained using
* bpf_ct_insert_entry, bpf_xdp_ct_lookup or bpf_skb_ct_lookup.
* @status - New status value.
*/
__bpf_kfunc int bpf_ct_change_status(struct nf_conn *nfct, u32 status)
{
return nf_ct_change_status_common(nfct, status);
}
__diag_pop()
BTF_SET8_START(nf_ct_kfunc_set)
BTF_ID_FLAGS(func, bpf_xdp_ct_alloc, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_xdp_ct_lookup, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_skb_ct_alloc, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_skb_ct_lookup, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_ct_insert_entry, KF_ACQUIRE | KF_RET_NULL | KF_RELEASE)
BTF_ID_FLAGS(func, bpf_ct_release, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_ct_set_timeout, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_ct_change_timeout, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_ct_set_status, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_ct_change_status, KF_TRUSTED_ARGS)
BTF_SET8_END(nf_ct_kfunc_set)
static const struct btf_kfunc_id_set nf_conntrack_kfunc_set = {
.owner = THIS_MODULE,
.set = &nf_ct_kfunc_set,
};
int register_nf_conntrack_bpf(void)
{
int ret;
ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &nf_conntrack_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &nf_conntrack_kfunc_set);
if (!ret) {
mutex_lock(&nf_conn_btf_access_lock);
nfct_btf_struct_access = _nf_conntrack_btf_struct_access;
mutex_unlock(&nf_conn_btf_access_lock);
}
return ret;
}
void cleanup_nf_conntrack_bpf(void)
{
mutex_lock(&nf_conn_btf_access_lock);
nfct_btf_struct_access = NULL;
mutex_unlock(&nf_conn_btf_access_lock);
}
| linux-master | net/netfilter/nf_conntrack_bpf.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Kernel module to match MAC address parameters. */
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2004 Netfilter Core Team <[email protected]>
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/etherdevice.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter/xt_mac.h>
#include <linux/netfilter/x_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Netfilter Core Team <[email protected]>");
MODULE_DESCRIPTION("Xtables: MAC address match");
MODULE_ALIAS("ipt_mac");
MODULE_ALIAS("ip6t_mac");
static bool mac_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_mac_info *info = par->matchinfo;
bool ret;
if (skb->dev == NULL || skb->dev->type != ARPHRD_ETHER)
return false;
if (skb_mac_header(skb) < skb->head)
return false;
if (skb_mac_header(skb) + ETH_HLEN > skb->data)
return false;
ret = ether_addr_equal(eth_hdr(skb)->h_source, info->srcaddr);
ret ^= info->invert;
return ret;
}
static struct xt_match mac_mt_reg __read_mostly = {
.name = "mac",
.revision = 0,
.family = NFPROTO_UNSPEC,
.match = mac_mt,
.matchsize = sizeof(struct xt_mac_info),
.hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_FORWARD),
.me = THIS_MODULE,
};
static int __init mac_mt_init(void)
{
return xt_register_match(&mac_mt_reg);
}
static void __exit mac_mt_exit(void)
{
xt_unregister_match(&mac_mt_reg);
}
module_init(mac_mt_init);
module_exit(mac_mt_exit);
| linux-master | net/netfilter/xt_mac.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/atomic.h>
#include <linux/inetdevice.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <net/netfilter/nf_nat_masquerade.h>
struct masq_dev_work {
struct work_struct work;
struct net *net;
netns_tracker ns_tracker;
union nf_inet_addr addr;
int ifindex;
int (*iter)(struct nf_conn *i, void *data);
};
#define MAX_MASQ_WORKER_COUNT 16
static DEFINE_MUTEX(masq_mutex);
static unsigned int masq_refcnt __read_mostly;
static atomic_t masq_worker_count __read_mostly;
unsigned int
nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
const struct nf_nat_range2 *range,
const struct net_device *out)
{
struct nf_conn *ct;
struct nf_conn_nat *nat;
enum ip_conntrack_info ctinfo;
struct nf_nat_range2 newrange;
const struct rtable *rt;
__be32 newsrc, nh;
WARN_ON(hooknum != NF_INET_POST_ROUTING);
ct = nf_ct_get(skb, &ctinfo);
WARN_ON(!(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
ctinfo == IP_CT_RELATED_REPLY)));
/* Source address is 0.0.0.0 - locally generated packet that is
* probably not supposed to be masqueraded.
*/
if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip == 0)
return NF_ACCEPT;
rt = skb_rtable(skb);
nh = rt_nexthop(rt, ip_hdr(skb)->daddr);
newsrc = inet_select_addr(out, nh, RT_SCOPE_UNIVERSE);
if (!newsrc) {
pr_info("%s ate my IP address\n", out->name);
return NF_DROP;
}
nat = nf_ct_nat_ext_add(ct);
if (nat)
nat->masq_index = out->ifindex;
/* Transfer from original range. */
memset(&newrange.min_addr, 0, sizeof(newrange.min_addr));
memset(&newrange.max_addr, 0, sizeof(newrange.max_addr));
newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS;
newrange.min_addr.ip = newsrc;
newrange.max_addr.ip = newsrc;
newrange.min_proto = range->min_proto;
newrange.max_proto = range->max_proto;
/* Hand modified range to generic setup. */
return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC);
}
EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4);
static void iterate_cleanup_work(struct work_struct *work)
{
struct nf_ct_iter_data iter_data = {};
struct masq_dev_work *w;
w = container_of(work, struct masq_dev_work, work);
iter_data.net = w->net;
iter_data.data = (void *)w;
nf_ct_iterate_cleanup_net(w->iter, &iter_data);
put_net_track(w->net, &w->ns_tracker);
kfree(w);
atomic_dec(&masq_worker_count);
module_put(THIS_MODULE);
}
/* Iterate conntrack table in the background and remove conntrack entries
* that use the device/address being removed.
*
* In case too many work items have been queued already or memory allocation
* fails iteration is skipped, conntrack entries will time out eventually.
*/
static void nf_nat_masq_schedule(struct net *net, union nf_inet_addr *addr,
int ifindex,
int (*iter)(struct nf_conn *i, void *data),
gfp_t gfp_flags)
{
struct masq_dev_work *w;
if (atomic_read(&masq_worker_count) > MAX_MASQ_WORKER_COUNT)
return;
net = maybe_get_net(net);
if (!net)
return;
if (!try_module_get(THIS_MODULE))
goto err_module;
w = kzalloc(sizeof(*w), gfp_flags);
if (w) {
/* We can overshoot MAX_MASQ_WORKER_COUNT, no big deal */
atomic_inc(&masq_worker_count);
INIT_WORK(&w->work, iterate_cleanup_work);
w->ifindex = ifindex;
w->net = net;
netns_tracker_alloc(net, &w->ns_tracker, gfp_flags);
w->iter = iter;
if (addr)
w->addr = *addr;
schedule_work(&w->work);
return;
}
module_put(THIS_MODULE);
err_module:
put_net(net);
}
static int device_cmp(struct nf_conn *i, void *arg)
{
const struct nf_conn_nat *nat = nfct_nat(i);
const struct masq_dev_work *w = arg;
if (!nat)
return 0;
return nat->masq_index == w->ifindex;
}
static int masq_device_event(struct notifier_block *this,
unsigned long event,
void *ptr)
{
const struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct net *net = dev_net(dev);
if (event == NETDEV_DOWN) {
/* Device was downed. Search entire table for
* conntracks which were associated with that device,
* and forget them.
*/
nf_nat_masq_schedule(net, NULL, dev->ifindex,
device_cmp, GFP_KERNEL);
}
return NOTIFY_DONE;
}
static int inet_cmp(struct nf_conn *ct, void *ptr)
{
struct nf_conntrack_tuple *tuple;
struct masq_dev_work *w = ptr;
if (!device_cmp(ct, ptr))
return 0;
tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
return nf_inet_addr_cmp(&w->addr, &tuple->dst.u3);
}
static int masq_inet_event(struct notifier_block *this,
unsigned long event,
void *ptr)
{
const struct in_ifaddr *ifa = ptr;
const struct in_device *idev;
const struct net_device *dev;
union nf_inet_addr addr;
if (event != NETDEV_DOWN)
return NOTIFY_DONE;
/* The masq_dev_notifier will catch the case of the device going
* down. So if the inetdev is dead and being destroyed we have
* no work to do. Otherwise this is an individual address removal
* and we have to perform the flush.
*/
idev = ifa->ifa_dev;
if (idev->dead)
return NOTIFY_DONE;
memset(&addr, 0, sizeof(addr));
addr.ip = ifa->ifa_address;
dev = idev->dev;
nf_nat_masq_schedule(dev_net(idev->dev), &addr, dev->ifindex,
inet_cmp, GFP_KERNEL);
return NOTIFY_DONE;
}
static struct notifier_block masq_dev_notifier = {
.notifier_call = masq_device_event,
};
static struct notifier_block masq_inet_notifier = {
.notifier_call = masq_inet_event,
};
#if IS_ENABLED(CONFIG_IPV6)
static int
nat_ipv6_dev_get_saddr(struct net *net, const struct net_device *dev,
const struct in6_addr *daddr, unsigned int srcprefs,
struct in6_addr *saddr)
{
#ifdef CONFIG_IPV6_MODULE
const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
if (!v6_ops)
return -EHOSTUNREACH;
return v6_ops->dev_get_saddr(net, dev, daddr, srcprefs, saddr);
#else
return ipv6_dev_get_saddr(net, dev, daddr, srcprefs, saddr);
#endif
}
unsigned int
nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
const struct net_device *out)
{
enum ip_conntrack_info ctinfo;
struct nf_conn_nat *nat;
struct in6_addr src;
struct nf_conn *ct;
struct nf_nat_range2 newrange;
ct = nf_ct_get(skb, &ctinfo);
WARN_ON(!(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
ctinfo == IP_CT_RELATED_REPLY)));
if (nat_ipv6_dev_get_saddr(nf_ct_net(ct), out,
&ipv6_hdr(skb)->daddr, 0, &src) < 0)
return NF_DROP;
nat = nf_ct_nat_ext_add(ct);
if (nat)
nat->masq_index = out->ifindex;
newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS;
newrange.min_addr.in6 = src;
newrange.max_addr.in6 = src;
newrange.min_proto = range->min_proto;
newrange.max_proto = range->max_proto;
return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC);
}
EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6);
/* atomic notifier; can't call nf_ct_iterate_cleanup_net (it can sleep).
*
* Defer it to the system workqueue.
*
* As we can have 'a lot' of inet_events (depending on amount of ipv6
* addresses being deleted), we also need to limit work item queue.
*/
static int masq_inet6_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct inet6_ifaddr *ifa = ptr;
const struct net_device *dev;
union nf_inet_addr addr;
if (event != NETDEV_DOWN)
return NOTIFY_DONE;
dev = ifa->idev->dev;
memset(&addr, 0, sizeof(addr));
addr.in6 = ifa->addr;
nf_nat_masq_schedule(dev_net(dev), &addr, dev->ifindex, inet_cmp,
GFP_ATOMIC);
return NOTIFY_DONE;
}
static struct notifier_block masq_inet6_notifier = {
.notifier_call = masq_inet6_event,
};
static int nf_nat_masquerade_ipv6_register_notifier(void)
{
return register_inet6addr_notifier(&masq_inet6_notifier);
}
#else
static inline int nf_nat_masquerade_ipv6_register_notifier(void) { return 0; }
#endif
int nf_nat_masquerade_inet_register_notifiers(void)
{
int ret = 0;
mutex_lock(&masq_mutex);
if (WARN_ON_ONCE(masq_refcnt == UINT_MAX)) {
ret = -EOVERFLOW;
goto out_unlock;
}
/* check if the notifier was already set */
if (++masq_refcnt > 1)
goto out_unlock;
/* Register for device down reports */
ret = register_netdevice_notifier(&masq_dev_notifier);
if (ret)
goto err_dec;
/* Register IP address change reports */
ret = register_inetaddr_notifier(&masq_inet_notifier);
if (ret)
goto err_unregister;
ret = nf_nat_masquerade_ipv6_register_notifier();
if (ret)
goto err_unreg_inet;
mutex_unlock(&masq_mutex);
return ret;
err_unreg_inet:
unregister_inetaddr_notifier(&masq_inet_notifier);
err_unregister:
unregister_netdevice_notifier(&masq_dev_notifier);
err_dec:
masq_refcnt--;
out_unlock:
mutex_unlock(&masq_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(nf_nat_masquerade_inet_register_notifiers);
void nf_nat_masquerade_inet_unregister_notifiers(void)
{
mutex_lock(&masq_mutex);
/* check if the notifiers still have clients */
if (--masq_refcnt > 0)
goto out_unlock;
unregister_netdevice_notifier(&masq_dev_notifier);
unregister_inetaddr_notifier(&masq_inet_notifier);
#if IS_ENABLED(CONFIG_IPV6)
unregister_inet6addr_notifier(&masq_inet6_notifier);
#endif
out_unlock:
mutex_unlock(&masq_mutex);
}
EXPORT_SYMBOL_GPL(nf_nat_masquerade_inet_unregister_notifiers);
| linux-master | net/netfilter/nf_nat_masquerade.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Unstable NAT Helpers for XDP and TC-BPF hook
*
* These are called from the XDP and SCHED_CLS BPF programs. Note that it is
* allowed to break compatibility for these functions since the interface they
* are exposed through to BPF programs is explicitly unstable.
*/
#include <linux/bpf.h>
#include <linux/btf_ids.h>
#include <net/netfilter/nf_conntrack_bpf.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_nat.h>
__diag_push();
__diag_ignore_all("-Wmissing-prototypes",
"Global functions as their definitions will be in nf_nat BTF");
/* bpf_ct_set_nat_info - Set source or destination nat address
*
* Set source or destination nat address of the newly allocated
* nf_conn before insertion. This must be invoked for referenced
* PTR_TO_BTF_ID to nf_conn___init.
*
* Parameters:
* @nfct - Pointer to referenced nf_conn object, obtained using
* bpf_xdp_ct_alloc or bpf_skb_ct_alloc.
* @addr - Nat source/destination address
* @port - Nat source/destination port. Non-positive values are
* interpreted as select a random port.
* @manip - NF_NAT_MANIP_SRC or NF_NAT_MANIP_DST
*/
__bpf_kfunc int bpf_ct_set_nat_info(struct nf_conn___init *nfct,
union nf_inet_addr *addr, int port,
enum nf_nat_manip_type manip)
{
struct nf_conn *ct = (struct nf_conn *)nfct;
u16 proto = nf_ct_l3num(ct);
struct nf_nat_range2 range;
if (proto != NFPROTO_IPV4 && proto != NFPROTO_IPV6)
return -EINVAL;
memset(&range, 0, sizeof(struct nf_nat_range2));
range.flags = NF_NAT_RANGE_MAP_IPS;
range.min_addr = *addr;
range.max_addr = range.min_addr;
if (port > 0) {
range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
range.min_proto.all = cpu_to_be16(port);
range.max_proto.all = range.min_proto.all;
}
return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0;
}
__diag_pop()
BTF_SET8_START(nf_nat_kfunc_set)
BTF_ID_FLAGS(func, bpf_ct_set_nat_info, KF_TRUSTED_ARGS)
BTF_SET8_END(nf_nat_kfunc_set)
static const struct btf_kfunc_id_set nf_bpf_nat_kfunc_set = {
.owner = THIS_MODULE,
.set = &nf_nat_kfunc_set,
};
int register_nf_nat_bpf(void)
{
int ret;
ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP,
&nf_bpf_nat_kfunc_set);
if (ret)
return ret;
return register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS,
&nf_bpf_nat_kfunc_set);
}
| linux-master | net/netfilter/nf_nat_bpf.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* xt_connmark - Netfilter module to operate on connection marks
*
* Copyright (C) 2002,2004 MARA Systems AB <https://www.marasystems.com>
* by Henrik Nordstrom <[email protected]>
* Copyright © CC Computer Consultants GmbH, 2007 - 2008
* Jan Engelhardt <[email protected]>
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_connmark.h>
MODULE_AUTHOR("Henrik Nordstrom <[email protected]>");
MODULE_DESCRIPTION("Xtables: connection mark operations");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_CONNMARK");
MODULE_ALIAS("ip6t_CONNMARK");
MODULE_ALIAS("ipt_connmark");
MODULE_ALIAS("ip6t_connmark");
static unsigned int
connmark_tg_shift(struct sk_buff *skb, const struct xt_connmark_tginfo2 *info)
{
enum ip_conntrack_info ctinfo;
u_int32_t new_targetmark;
struct nf_conn *ct;
u_int32_t newmark;
u_int32_t oldmark;
ct = nf_ct_get(skb, &ctinfo);
if (ct == NULL)
return XT_CONTINUE;
switch (info->mode) {
case XT_CONNMARK_SET:
oldmark = READ_ONCE(ct->mark);
newmark = (oldmark & ~info->ctmask) ^ info->ctmark;
if (info->shift_dir == D_SHIFT_RIGHT)
newmark >>= info->shift_bits;
else
newmark <<= info->shift_bits;
if (READ_ONCE(ct->mark) != newmark) {
WRITE_ONCE(ct->mark, newmark);
nf_conntrack_event_cache(IPCT_MARK, ct);
}
break;
case XT_CONNMARK_SAVE:
new_targetmark = (skb->mark & info->nfmask);
if (info->shift_dir == D_SHIFT_RIGHT)
new_targetmark >>= info->shift_bits;
else
new_targetmark <<= info->shift_bits;
newmark = (READ_ONCE(ct->mark) & ~info->ctmask) ^
new_targetmark;
if (READ_ONCE(ct->mark) != newmark) {
WRITE_ONCE(ct->mark, newmark);
nf_conntrack_event_cache(IPCT_MARK, ct);
}
break;
case XT_CONNMARK_RESTORE:
new_targetmark = (READ_ONCE(ct->mark) & info->ctmask);
if (info->shift_dir == D_SHIFT_RIGHT)
new_targetmark >>= info->shift_bits;
else
new_targetmark <<= info->shift_bits;
newmark = (skb->mark & ~info->nfmask) ^
new_targetmark;
skb->mark = newmark;
break;
}
return XT_CONTINUE;
}
static unsigned int
connmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_connmark_tginfo1 *info = par->targinfo;
const struct xt_connmark_tginfo2 info2 = {
.ctmark = info->ctmark,
.ctmask = info->ctmask,
.nfmask = info->nfmask,
.mode = info->mode,
};
return connmark_tg_shift(skb, &info2);
}
static unsigned int
connmark_tg_v2(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_connmark_tginfo2 *info = par->targinfo;
return connmark_tg_shift(skb, info);
}
static int connmark_tg_check(const struct xt_tgchk_param *par)
{
int ret;
ret = nf_ct_netns_get(par->net, par->family);
if (ret < 0)
pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
par->family);
return ret;
}
static void connmark_tg_destroy(const struct xt_tgdtor_param *par)
{
nf_ct_netns_put(par->net, par->family);
}
static bool
connmark_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_connmark_mtinfo1 *info = par->matchinfo;
enum ip_conntrack_info ctinfo;
const struct nf_conn *ct;
ct = nf_ct_get(skb, &ctinfo);
if (ct == NULL)
return false;
return ((READ_ONCE(ct->mark) & info->mask) == info->mark) ^ info->invert;
}
static int connmark_mt_check(const struct xt_mtchk_param *par)
{
int ret;
ret = nf_ct_netns_get(par->net, par->family);
if (ret < 0)
pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
par->family);
return ret;
}
static void connmark_mt_destroy(const struct xt_mtdtor_param *par)
{
nf_ct_netns_put(par->net, par->family);
}
static struct xt_target connmark_tg_reg[] __read_mostly = {
{
.name = "CONNMARK",
.revision = 1,
.family = NFPROTO_UNSPEC,
.checkentry = connmark_tg_check,
.target = connmark_tg,
.targetsize = sizeof(struct xt_connmark_tginfo1),
.destroy = connmark_tg_destroy,
.me = THIS_MODULE,
},
{
.name = "CONNMARK",
.revision = 2,
.family = NFPROTO_UNSPEC,
.checkentry = connmark_tg_check,
.target = connmark_tg_v2,
.targetsize = sizeof(struct xt_connmark_tginfo2),
.destroy = connmark_tg_destroy,
.me = THIS_MODULE,
}
};
static struct xt_match connmark_mt_reg __read_mostly = {
.name = "connmark",
.revision = 1,
.family = NFPROTO_UNSPEC,
.checkentry = connmark_mt_check,
.match = connmark_mt,
.matchsize = sizeof(struct xt_connmark_mtinfo1),
.destroy = connmark_mt_destroy,
.me = THIS_MODULE,
};
static int __init connmark_mt_init(void)
{
int ret;
ret = xt_register_targets(connmark_tg_reg,
ARRAY_SIZE(connmark_tg_reg));
if (ret < 0)
return ret;
ret = xt_register_match(&connmark_mt_reg);
if (ret < 0) {
xt_unregister_targets(connmark_tg_reg,
ARRAY_SIZE(connmark_tg_reg));
return ret;
}
return 0;
}
static void __exit connmark_mt_exit(void)
{
xt_unregister_match(&connmark_mt_reg);
xt_unregister_targets(connmark_tg_reg, ARRAY_SIZE(connmark_tg_reg));
}
module_init(connmark_mt_init);
module_exit(connmark_mt_exit);
| linux-master | net/netfilter/xt_connmark.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nft_fib.h>
static void nft_fib_inet_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_fib *priv = nft_expr_priv(expr);
switch (nft_pf(pkt)) {
case NFPROTO_IPV4:
switch (priv->result) {
case NFT_FIB_RESULT_OIF:
case NFT_FIB_RESULT_OIFNAME:
return nft_fib4_eval(expr, regs, pkt);
case NFT_FIB_RESULT_ADDRTYPE:
return nft_fib4_eval_type(expr, regs, pkt);
}
break;
case NFPROTO_IPV6:
switch (priv->result) {
case NFT_FIB_RESULT_OIF:
case NFT_FIB_RESULT_OIFNAME:
return nft_fib6_eval(expr, regs, pkt);
case NFT_FIB_RESULT_ADDRTYPE:
return nft_fib6_eval_type(expr, regs, pkt);
}
break;
}
regs->verdict.code = NF_DROP;
}
static struct nft_expr_type nft_fib_inet_type;
static const struct nft_expr_ops nft_fib_inet_ops = {
.type = &nft_fib_inet_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_fib)),
.eval = nft_fib_inet_eval,
.init = nft_fib_init,
.dump = nft_fib_dump,
.validate = nft_fib_validate,
.reduce = nft_fib_reduce,
};
static struct nft_expr_type nft_fib_inet_type __read_mostly = {
.family = NFPROTO_INET,
.name = "fib",
.ops = &nft_fib_inet_ops,
.policy = nft_fib_policy,
.maxattr = NFTA_FIB_MAX,
.owner = THIS_MODULE,
};
static int __init nft_fib_inet_module_init(void)
{
return nft_register_expr(&nft_fib_inet_type);
}
static void __exit nft_fib_inet_module_exit(void)
{
nft_unregister_expr(&nft_fib_inet_type);
}
module_init(nft_fib_inet_module_init);
module_exit(nft_fib_inet_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Florian Westphal <[email protected]>");
MODULE_ALIAS_NFT_AF_EXPR(1, "fib");
MODULE_DESCRIPTION("nftables fib inet support");
| linux-master | net/netfilter/nft_fib_inet.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2021 Red Hat GmbH
*
* Author: Florian Westphal <[email protected]>
*/
#include <linux/bpf.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <linux/netlink.h>
#include <linux/slab.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_hook.h>
#include <net/netfilter/nf_tables.h>
#include <net/sock.h>
static const struct nla_policy nfnl_hook_nla_policy[NFNLA_HOOK_MAX + 1] = {
[NFNLA_HOOK_HOOKNUM] = { .type = NLA_U32 },
[NFNLA_HOOK_PRIORITY] = { .type = NLA_U32 },
[NFNLA_HOOK_DEV] = { .type = NLA_STRING,
.len = IFNAMSIZ - 1 },
[NFNLA_HOOK_FUNCTION_NAME] = { .type = NLA_NUL_STRING,
.len = KSYM_NAME_LEN, },
[NFNLA_HOOK_MODULE_NAME] = { .type = NLA_NUL_STRING,
.len = MODULE_NAME_LEN, },
[NFNLA_HOOK_CHAIN_INFO] = { .type = NLA_NESTED, },
};
static int nf_netlink_dump_start_rcu(struct sock *nlsk, struct sk_buff *skb,
const struct nlmsghdr *nlh,
struct netlink_dump_control *c)
{
int err;
if (!try_module_get(THIS_MODULE))
return -EINVAL;
rcu_read_unlock();
err = netlink_dump_start(nlsk, skb, nlh, c);
rcu_read_lock();
module_put(THIS_MODULE);
return err;
}
struct nfnl_dump_hook_data {
char devname[IFNAMSIZ];
unsigned long headv;
u8 hook;
};
static struct nlattr *nfnl_start_info_type(struct sk_buff *nlskb, enum nfnl_hook_chaintype t)
{
struct nlattr *nest = nla_nest_start(nlskb, NFNLA_HOOK_CHAIN_INFO);
int ret;
if (!nest)
return NULL;
ret = nla_put_be32(nlskb, NFNLA_HOOK_INFO_TYPE, htonl(t));
if (ret == 0)
return nest;
nla_nest_cancel(nlskb, nest);
return NULL;
}
static int nfnl_hook_put_bpf_prog_info(struct sk_buff *nlskb,
const struct nfnl_dump_hook_data *ctx,
unsigned int seq,
const struct bpf_prog *prog)
{
struct nlattr *nest, *nest2;
int ret;
if (!IS_ENABLED(CONFIG_NETFILTER_BPF_LINK))
return 0;
if (WARN_ON_ONCE(!prog))
return 0;
nest = nfnl_start_info_type(nlskb, NFNL_HOOK_TYPE_BPF);
if (!nest)
return -EMSGSIZE;
nest2 = nla_nest_start(nlskb, NFNLA_HOOK_INFO_DESC);
if (!nest2)
goto cancel_nest;
ret = nla_put_be32(nlskb, NFNLA_HOOK_BPF_ID, htonl(prog->aux->id));
if (ret)
goto cancel_nest;
nla_nest_end(nlskb, nest2);
nla_nest_end(nlskb, nest);
return 0;
cancel_nest:
nla_nest_cancel(nlskb, nest);
return -EMSGSIZE;
}
static int nfnl_hook_put_nft_chain_info(struct sk_buff *nlskb,
const struct nfnl_dump_hook_data *ctx,
unsigned int seq,
struct nft_chain *chain)
{
struct net *net = sock_net(nlskb->sk);
struct nlattr *nest, *nest2;
int ret = 0;
if (WARN_ON_ONCE(!chain))
return 0;
if (!nft_is_active(net, chain))
return 0;
nest = nfnl_start_info_type(nlskb, NFNL_HOOK_TYPE_NFTABLES);
if (!nest)
return -EMSGSIZE;
nest2 = nla_nest_start(nlskb, NFNLA_HOOK_INFO_DESC);
if (!nest2)
goto cancel_nest;
ret = nla_put_string(nlskb, NFNLA_CHAIN_TABLE, chain->table->name);
if (ret)
goto cancel_nest;
ret = nla_put_string(nlskb, NFNLA_CHAIN_NAME, chain->name);
if (ret)
goto cancel_nest;
ret = nla_put_u8(nlskb, NFNLA_CHAIN_FAMILY, chain->table->family);
if (ret)
goto cancel_nest;
nla_nest_end(nlskb, nest2);
nla_nest_end(nlskb, nest);
return ret;
cancel_nest:
nla_nest_cancel(nlskb, nest);
return -EMSGSIZE;
}
static int nfnl_hook_dump_one(struct sk_buff *nlskb,
const struct nfnl_dump_hook_data *ctx,
const struct nf_hook_ops *ops,
int family, unsigned int seq)
{
u16 event = nfnl_msg_type(NFNL_SUBSYS_HOOK, NFNL_MSG_HOOK_GET);
unsigned int portid = NETLINK_CB(nlskb).portid;
struct nlmsghdr *nlh;
int ret = -EMSGSIZE;
u32 hooknum;
#ifdef CONFIG_KALLSYMS
char sym[KSYM_SYMBOL_LEN];
char *module_name;
#endif
nlh = nfnl_msg_put(nlskb, portid, seq, event,
NLM_F_MULTI, family, NFNETLINK_V0, 0);
if (!nlh)
goto nla_put_failure;
#ifdef CONFIG_KALLSYMS
ret = snprintf(sym, sizeof(sym), "%ps", ops->hook);
if (ret >= sizeof(sym)) {
ret = -EINVAL;
goto nla_put_failure;
}
module_name = strstr(sym, " [");
if (module_name) {
char *end;
*module_name = '\0';
module_name += 2;
end = strchr(module_name, ']');
if (end) {
*end = 0;
ret = nla_put_string(nlskb, NFNLA_HOOK_MODULE_NAME, module_name);
if (ret)
goto nla_put_failure;
}
}
ret = nla_put_string(nlskb, NFNLA_HOOK_FUNCTION_NAME, sym);
if (ret)
goto nla_put_failure;
#endif
if (ops->pf == NFPROTO_INET && ops->hooknum == NF_INET_INGRESS)
hooknum = NF_NETDEV_INGRESS;
else
hooknum = ops->hooknum;
ret = nla_put_be32(nlskb, NFNLA_HOOK_HOOKNUM, htonl(hooknum));
if (ret)
goto nla_put_failure;
ret = nla_put_be32(nlskb, NFNLA_HOOK_PRIORITY, htonl(ops->priority));
if (ret)
goto nla_put_failure;
switch (ops->hook_ops_type) {
case NF_HOOK_OP_NF_TABLES:
ret = nfnl_hook_put_nft_chain_info(nlskb, ctx, seq, ops->priv);
break;
case NF_HOOK_OP_BPF:
ret = nfnl_hook_put_bpf_prog_info(nlskb, ctx, seq, ops->priv);
break;
case NF_HOOK_OP_UNDEFINED:
break;
default:
WARN_ON_ONCE(1);
break;
}
if (ret)
goto nla_put_failure;
nlmsg_end(nlskb, nlh);
return 0;
nla_put_failure:
nlmsg_trim(nlskb, nlh);
return ret;
}
static const struct nf_hook_entries *
nfnl_hook_entries_head(u8 pf, unsigned int hook, struct net *net, const char *dev)
{
const struct nf_hook_entries *hook_head = NULL;
#if defined(CONFIG_NETFILTER_INGRESS) || defined(CONFIG_NETFILTER_EGRESS)
struct net_device *netdev;
#endif
switch (pf) {
case NFPROTO_IPV4:
if (hook >= ARRAY_SIZE(net->nf.hooks_ipv4))
return ERR_PTR(-EINVAL);
hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]);
break;
case NFPROTO_IPV6:
if (hook >= ARRAY_SIZE(net->nf.hooks_ipv6))
return ERR_PTR(-EINVAL);
hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]);
break;
case NFPROTO_ARP:
#ifdef CONFIG_NETFILTER_FAMILY_ARP
if (hook >= ARRAY_SIZE(net->nf.hooks_arp))
return ERR_PTR(-EINVAL);
hook_head = rcu_dereference(net->nf.hooks_arp[hook]);
#endif
break;
case NFPROTO_BRIDGE:
#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
if (hook >= ARRAY_SIZE(net->nf.hooks_bridge))
return ERR_PTR(-EINVAL);
hook_head = rcu_dereference(net->nf.hooks_bridge[hook]);
#endif
break;
#if defined(CONFIG_NETFILTER_INGRESS) || defined(CONFIG_NETFILTER_EGRESS)
case NFPROTO_NETDEV:
if (hook >= NF_NETDEV_NUMHOOKS)
return ERR_PTR(-EOPNOTSUPP);
if (!dev)
return ERR_PTR(-ENODEV);
netdev = dev_get_by_name_rcu(net, dev);
if (!netdev)
return ERR_PTR(-ENODEV);
#ifdef CONFIG_NETFILTER_INGRESS
if (hook == NF_NETDEV_INGRESS)
return rcu_dereference(netdev->nf_hooks_ingress);
#endif
#ifdef CONFIG_NETFILTER_EGRESS
if (hook == NF_NETDEV_EGRESS)
return rcu_dereference(netdev->nf_hooks_egress);
#endif
fallthrough;
#endif
default:
return ERR_PTR(-EPROTONOSUPPORT);
}
return hook_head;
}
static int nfnl_hook_dump(struct sk_buff *nlskb,
struct netlink_callback *cb)
{
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
struct nfnl_dump_hook_data *ctx = cb->data;
int err, family = nfmsg->nfgen_family;
struct net *net = sock_net(nlskb->sk);
struct nf_hook_ops * const *ops;
const struct nf_hook_entries *e;
unsigned int i = cb->args[0];
rcu_read_lock();
e = nfnl_hook_entries_head(family, ctx->hook, net, ctx->devname);
if (!e)
goto done;
if (IS_ERR(e)) {
cb->seq++;
goto done;
}
if ((unsigned long)e != ctx->headv || i >= e->num_hook_entries)
cb->seq++;
ops = nf_hook_entries_get_hook_ops(e);
for (; i < e->num_hook_entries; i++) {
err = nfnl_hook_dump_one(nlskb, ctx, ops[i], family,
cb->nlh->nlmsg_seq);
if (err)
break;
}
done:
nl_dump_check_consistent(cb, nlmsg_hdr(nlskb));
rcu_read_unlock();
cb->args[0] = i;
return nlskb->len;
}
static int nfnl_hook_dump_start(struct netlink_callback *cb)
{
const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
const struct nlattr * const *nla = cb->data;
struct nfnl_dump_hook_data *ctx = NULL;
struct net *net = sock_net(cb->skb->sk);
u8 family = nfmsg->nfgen_family;
char name[IFNAMSIZ] = "";
const void *head;
u32 hooknum;
hooknum = ntohl(nla_get_be32(nla[NFNLA_HOOK_HOOKNUM]));
if (hooknum > 255)
return -EINVAL;
if (family == NFPROTO_NETDEV) {
if (!nla[NFNLA_HOOK_DEV])
return -EINVAL;
nla_strscpy(name, nla[NFNLA_HOOK_DEV], sizeof(name));
}
rcu_read_lock();
/* Not dereferenced; for consistency check only */
head = nfnl_hook_entries_head(family, hooknum, net, name);
rcu_read_unlock();
if (head && IS_ERR(head))
return PTR_ERR(head);
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
strscpy(ctx->devname, name, sizeof(ctx->devname));
ctx->headv = (unsigned long)head;
ctx->hook = hooknum;
cb->seq = 1;
cb->data = ctx;
return 0;
}
static int nfnl_hook_dump_stop(struct netlink_callback *cb)
{
kfree(cb->data);
return 0;
}
static int nfnl_hook_get(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const nla[])
{
if (!nla[NFNLA_HOOK_HOOKNUM])
return -EINVAL;
if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.start = nfnl_hook_dump_start,
.done = nfnl_hook_dump_stop,
.dump = nfnl_hook_dump,
.module = THIS_MODULE,
.data = (void *)nla,
};
return nf_netlink_dump_start_rcu(info->sk, skb, info->nlh, &c);
}
return -EOPNOTSUPP;
}
static const struct nfnl_callback nfnl_hook_cb[NFNL_MSG_HOOK_MAX] = {
[NFNL_MSG_HOOK_GET] = {
.call = nfnl_hook_get,
.type = NFNL_CB_RCU,
.attr_count = NFNLA_HOOK_MAX,
.policy = nfnl_hook_nla_policy
},
};
static const struct nfnetlink_subsystem nfhook_subsys = {
.name = "nfhook",
.subsys_id = NFNL_SUBSYS_HOOK,
.cb_count = NFNL_MSG_HOOK_MAX,
.cb = nfnl_hook_cb,
};
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_HOOK);
static int __init nfnetlink_hook_init(void)
{
return nfnetlink_subsys_register(&nfhook_subsys);
}
static void __exit nfnetlink_hook_exit(void)
{
nfnetlink_subsys_unregister(&nfhook_subsys);
}
module_init(nfnetlink_hook_init);
module_exit(nfnetlink_hook_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Florian Westphal <[email protected]>");
MODULE_DESCRIPTION("nfnetlink_hook: list registered netfilter hooks");
| linux-master | net/netfilter/nfnetlink_hook.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020 Laura Garcia Liebana <[email protected]>
* Copyright (c) 2020 Jose M. Guisado <[email protected]>
*/
#include <linux/etherdevice.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nft_reject.h>
#include <net/netfilter/ipv4/nf_reject.h>
#include <net/netfilter/ipv6/nf_reject.h>
static void nft_reject_queue_xmit(struct sk_buff *nskb, struct sk_buff *oldskb)
{
dev_hard_header(nskb, nskb->dev, ntohs(oldskb->protocol),
eth_hdr(oldskb)->h_source, eth_hdr(oldskb)->h_dest,
nskb->len);
dev_queue_xmit(nskb);
}
static void nft_reject_netdev_send_v4_tcp_reset(struct net *net,
struct sk_buff *oldskb,
const struct net_device *dev,
int hook)
{
struct sk_buff *nskb;
nskb = nf_reject_skb_v4_tcp_reset(net, oldskb, dev, hook);
if (!nskb)
return;
nft_reject_queue_xmit(nskb, oldskb);
}
static void nft_reject_netdev_send_v4_unreach(struct net *net,
struct sk_buff *oldskb,
const struct net_device *dev,
int hook, u8 code)
{
struct sk_buff *nskb;
nskb = nf_reject_skb_v4_unreach(net, oldskb, dev, hook, code);
if (!nskb)
return;
nft_reject_queue_xmit(nskb, oldskb);
}
static void nft_reject_netdev_send_v6_tcp_reset(struct net *net,
struct sk_buff *oldskb,
const struct net_device *dev,
int hook)
{
struct sk_buff *nskb;
nskb = nf_reject_skb_v6_tcp_reset(net, oldskb, dev, hook);
if (!nskb)
return;
nft_reject_queue_xmit(nskb, oldskb);
}
static void nft_reject_netdev_send_v6_unreach(struct net *net,
struct sk_buff *oldskb,
const struct net_device *dev,
int hook, u8 code)
{
struct sk_buff *nskb;
nskb = nf_reject_skb_v6_unreach(net, oldskb, dev, hook, code);
if (!nskb)
return;
nft_reject_queue_xmit(nskb, oldskb);
}
static void nft_reject_netdev_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct ethhdr *eth = eth_hdr(pkt->skb);
struct nft_reject *priv = nft_expr_priv(expr);
const unsigned char *dest = eth->h_dest;
if (is_broadcast_ether_addr(dest) ||
is_multicast_ether_addr(dest))
goto out;
switch (eth->h_proto) {
case htons(ETH_P_IP):
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
nft_reject_netdev_send_v4_unreach(nft_net(pkt), pkt->skb,
nft_in(pkt),
nft_hook(pkt),
priv->icmp_code);
break;
case NFT_REJECT_TCP_RST:
nft_reject_netdev_send_v4_tcp_reset(nft_net(pkt), pkt->skb,
nft_in(pkt),
nft_hook(pkt));
break;
case NFT_REJECT_ICMPX_UNREACH:
nft_reject_netdev_send_v4_unreach(nft_net(pkt), pkt->skb,
nft_in(pkt),
nft_hook(pkt),
nft_reject_icmp_code(priv->icmp_code));
break;
}
break;
case htons(ETH_P_IPV6):
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
nft_reject_netdev_send_v6_unreach(nft_net(pkt), pkt->skb,
nft_in(pkt),
nft_hook(pkt),
priv->icmp_code);
break;
case NFT_REJECT_TCP_RST:
nft_reject_netdev_send_v6_tcp_reset(nft_net(pkt), pkt->skb,
nft_in(pkt),
nft_hook(pkt));
break;
case NFT_REJECT_ICMPX_UNREACH:
nft_reject_netdev_send_v6_unreach(nft_net(pkt), pkt->skb,
nft_in(pkt),
nft_hook(pkt),
nft_reject_icmpv6_code(priv->icmp_code));
break;
}
break;
default:
/* No explicit way to reject this protocol, drop it. */
break;
}
out:
regs->verdict.code = NF_DROP;
}
static int nft_reject_netdev_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
return nft_chain_validate_hooks(ctx->chain, (1 << NF_NETDEV_INGRESS));
}
static struct nft_expr_type nft_reject_netdev_type;
static const struct nft_expr_ops nft_reject_netdev_ops = {
.type = &nft_reject_netdev_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
.eval = nft_reject_netdev_eval,
.init = nft_reject_init,
.dump = nft_reject_dump,
.validate = nft_reject_netdev_validate,
.reduce = NFT_REDUCE_READONLY,
};
static struct nft_expr_type nft_reject_netdev_type __read_mostly = {
.family = NFPROTO_NETDEV,
.name = "reject",
.ops = &nft_reject_netdev_ops,
.policy = nft_reject_policy,
.maxattr = NFTA_REJECT_MAX,
.owner = THIS_MODULE,
};
static int __init nft_reject_netdev_module_init(void)
{
return nft_register_expr(&nft_reject_netdev_type);
}
static void __exit nft_reject_netdev_module_exit(void)
{
nft_unregister_expr(&nft_reject_netdev_type);
}
module_init(nft_reject_netdev_module_init);
module_exit(nft_reject_netdev_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Laura Garcia Liebana <[email protected]>");
MODULE_AUTHOR("Jose M. Guisado <[email protected]>");
MODULE_DESCRIPTION("Reject packets from netdev via nftables");
MODULE_ALIAS_NFT_AF_EXPR(5, "reject");
| linux-master | net/netfilter/nft_reject_netdev.c |
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netfilter.h>
#include <net/flow_offload.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_offload.h>
#include <net/pkt_cls.h>
static struct nft_flow_rule *nft_flow_rule_alloc(int num_actions)
{
struct nft_flow_rule *flow;
flow = kzalloc(sizeof(struct nft_flow_rule), GFP_KERNEL);
if (!flow)
return NULL;
flow->rule = flow_rule_alloc(num_actions);
if (!flow->rule) {
kfree(flow);
return NULL;
}
flow->rule->match.dissector = &flow->match.dissector;
flow->rule->match.mask = &flow->match.mask;
flow->rule->match.key = &flow->match.key;
return flow;
}
void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
enum flow_dissector_key_id addr_type)
{
struct nft_flow_match *match = &flow->match;
struct nft_flow_key *mask = &match->mask;
struct nft_flow_key *key = &match->key;
if (match->dissector.used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL))
return;
key->control.addr_type = addr_type;
mask->control.addr_type = 0xffff;
match->dissector.used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL);
match->dissector.offset[FLOW_DISSECTOR_KEY_CONTROL] =
offsetof(struct nft_flow_key, control);
}
struct nft_offload_ethertype {
__be16 value;
__be16 mask;
};
static void nft_flow_rule_transfer_vlan(struct nft_offload_ctx *ctx,
struct nft_flow_rule *flow)
{
struct nft_flow_match *match = &flow->match;
struct nft_offload_ethertype ethertype = {
.value = match->key.basic.n_proto,
.mask = match->mask.basic.n_proto,
};
if (match->dissector.used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) &&
(match->key.vlan.vlan_tpid == htons(ETH_P_8021Q) ||
match->key.vlan.vlan_tpid == htons(ETH_P_8021AD))) {
match->key.basic.n_proto = match->key.cvlan.vlan_tpid;
match->mask.basic.n_proto = match->mask.cvlan.vlan_tpid;
match->key.cvlan.vlan_tpid = match->key.vlan.vlan_tpid;
match->mask.cvlan.vlan_tpid = match->mask.vlan.vlan_tpid;
match->key.vlan.vlan_tpid = ethertype.value;
match->mask.vlan.vlan_tpid = ethertype.mask;
match->dissector.offset[FLOW_DISSECTOR_KEY_CVLAN] =
offsetof(struct nft_flow_key, cvlan);
match->dissector.used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN);
} else if (match->dissector.used_keys &
BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) &&
(match->key.basic.n_proto == htons(ETH_P_8021Q) ||
match->key.basic.n_proto == htons(ETH_P_8021AD))) {
match->key.basic.n_proto = match->key.vlan.vlan_tpid;
match->mask.basic.n_proto = match->mask.vlan.vlan_tpid;
match->key.vlan.vlan_tpid = ethertype.value;
match->mask.vlan.vlan_tpid = ethertype.mask;
match->dissector.offset[FLOW_DISSECTOR_KEY_VLAN] =
offsetof(struct nft_flow_key, vlan);
match->dissector.used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_VLAN);
}
}
struct nft_flow_rule *nft_flow_rule_create(struct net *net,
const struct nft_rule *rule)
{
struct nft_offload_ctx *ctx;
struct nft_flow_rule *flow;
int num_actions = 0, err;
struct nft_expr *expr;
expr = nft_expr_first(rule);
while (nft_expr_more(rule, expr)) {
if (expr->ops->offload_action &&
expr->ops->offload_action(expr))
num_actions++;
expr = nft_expr_next(expr);
}
if (num_actions == 0)
return ERR_PTR(-EOPNOTSUPP);
flow = nft_flow_rule_alloc(num_actions);
if (!flow)
return ERR_PTR(-ENOMEM);
expr = nft_expr_first(rule);
ctx = kzalloc(sizeof(struct nft_offload_ctx), GFP_KERNEL);
if (!ctx) {
err = -ENOMEM;
goto err_out;
}
ctx->net = net;
ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC;
while (nft_expr_more(rule, expr)) {
if (!expr->ops->offload) {
err = -EOPNOTSUPP;
goto err_out;
}
err = expr->ops->offload(ctx, flow, expr);
if (err < 0)
goto err_out;
expr = nft_expr_next(expr);
}
nft_flow_rule_transfer_vlan(ctx, flow);
flow->proto = ctx->dep.l3num;
kfree(ctx);
return flow;
err_out:
kfree(ctx);
nft_flow_rule_destroy(flow);
return ERR_PTR(err);
}
void nft_flow_rule_destroy(struct nft_flow_rule *flow)
{
struct flow_action_entry *entry;
int i;
flow_action_for_each(i, entry, &flow->rule->action) {
switch (entry->id) {
case FLOW_ACTION_REDIRECT:
case FLOW_ACTION_MIRRED:
dev_put(entry->dev);
break;
default:
break;
}
}
kfree(flow->rule);
kfree(flow);
}
void nft_offload_set_dependency(struct nft_offload_ctx *ctx,
enum nft_offload_dep_type type)
{
ctx->dep.type = type;
}
void nft_offload_update_dependency(struct nft_offload_ctx *ctx,
const void *data, u32 len)
{
switch (ctx->dep.type) {
case NFT_OFFLOAD_DEP_NETWORK:
WARN_ON(len != sizeof(__u16));
memcpy(&ctx->dep.l3num, data, sizeof(__u16));
break;
case NFT_OFFLOAD_DEP_TRANSPORT:
WARN_ON(len != sizeof(__u8));
memcpy(&ctx->dep.protonum, data, sizeof(__u8));
break;
default:
break;
}
ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC;
}
static void nft_flow_offload_common_init(struct flow_cls_common_offload *common,
__be16 proto, int priority,
struct netlink_ext_ack *extack)
{
common->protocol = proto;
common->prio = priority;
common->extack = extack;
}
static int nft_setup_cb_call(enum tc_setup_type type, void *type_data,
struct list_head *cb_list)
{
struct flow_block_cb *block_cb;
int err;
list_for_each_entry(block_cb, cb_list, list) {
err = block_cb->cb(type, type_data, block_cb->cb_priv);
if (err < 0)
return err;
}
return 0;
}
static int nft_chain_offload_priority(const struct nft_base_chain *basechain)
{
if (basechain->ops.priority <= 0 ||
basechain->ops.priority > USHRT_MAX)
return -1;
return 0;
}
bool nft_chain_offload_support(const struct nft_base_chain *basechain)
{
struct net_device *dev;
struct nft_hook *hook;
if (nft_chain_offload_priority(basechain) < 0)
return false;
list_for_each_entry(hook, &basechain->hook_list, list) {
if (hook->ops.pf != NFPROTO_NETDEV ||
hook->ops.hooknum != NF_NETDEV_INGRESS)
return false;
dev = hook->ops.dev;
if (!dev->netdev_ops->ndo_setup_tc && !flow_indr_dev_exists())
return false;
}
return true;
}
static void nft_flow_cls_offload_setup(struct flow_cls_offload *cls_flow,
const struct nft_base_chain *basechain,
const struct nft_rule *rule,
const struct nft_flow_rule *flow,
struct netlink_ext_ack *extack,
enum flow_cls_command command)
{
__be16 proto = ETH_P_ALL;
memset(cls_flow, 0, sizeof(*cls_flow));
if (flow)
proto = flow->proto;
nft_flow_offload_common_init(&cls_flow->common, proto,
basechain->ops.priority, extack);
cls_flow->command = command;
cls_flow->cookie = (unsigned long) rule;
if (flow)
cls_flow->rule = flow->rule;
}
static int nft_flow_offload_cmd(const struct nft_chain *chain,
const struct nft_rule *rule,
struct nft_flow_rule *flow,
enum flow_cls_command command,
struct flow_cls_offload *cls_flow)
{
struct netlink_ext_ack extack = {};
struct nft_base_chain *basechain;
if (!nft_is_base_chain(chain))
return -EOPNOTSUPP;
basechain = nft_base_chain(chain);
nft_flow_cls_offload_setup(cls_flow, basechain, rule, flow, &extack,
command);
return nft_setup_cb_call(TC_SETUP_CLSFLOWER, cls_flow,
&basechain->flow_block.cb_list);
}
static int nft_flow_offload_rule(const struct nft_chain *chain,
struct nft_rule *rule,
struct nft_flow_rule *flow,
enum flow_cls_command command)
{
struct flow_cls_offload cls_flow;
return nft_flow_offload_cmd(chain, rule, flow, command, &cls_flow);
}
int nft_flow_rule_stats(const struct nft_chain *chain,
const struct nft_rule *rule)
{
struct flow_cls_offload cls_flow = {};
struct nft_expr *expr, *next;
int err;
err = nft_flow_offload_cmd(chain, rule, NULL, FLOW_CLS_STATS,
&cls_flow);
if (err < 0)
return err;
nft_rule_for_each_expr(expr, next, rule) {
if (expr->ops->offload_stats)
expr->ops->offload_stats(expr, &cls_flow.stats);
}
return 0;
}
static int nft_flow_offload_bind(struct flow_block_offload *bo,
struct nft_base_chain *basechain)
{
list_splice(&bo->cb_list, &basechain->flow_block.cb_list);
return 0;
}
static int nft_flow_offload_unbind(struct flow_block_offload *bo,
struct nft_base_chain *basechain)
{
struct flow_block_cb *block_cb, *next;
struct flow_cls_offload cls_flow;
struct netlink_ext_ack extack;
struct nft_chain *chain;
struct nft_rule *rule;
chain = &basechain->chain;
list_for_each_entry(rule, &chain->rules, list) {
memset(&extack, 0, sizeof(extack));
nft_flow_cls_offload_setup(&cls_flow, basechain, rule, NULL,
&extack, FLOW_CLS_DESTROY);
nft_setup_cb_call(TC_SETUP_CLSFLOWER, &cls_flow, &bo->cb_list);
}
list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
list_del(&block_cb->list);
flow_block_cb_free(block_cb);
}
return 0;
}
static int nft_block_setup(struct nft_base_chain *basechain,
struct flow_block_offload *bo,
enum flow_block_command cmd)
{
int err;
switch (cmd) {
case FLOW_BLOCK_BIND:
err = nft_flow_offload_bind(bo, basechain);
break;
case FLOW_BLOCK_UNBIND:
err = nft_flow_offload_unbind(bo, basechain);
break;
default:
WARN_ON_ONCE(1);
err = -EOPNOTSUPP;
}
return err;
}
static void nft_flow_block_offload_init(struct flow_block_offload *bo,
struct net *net,
enum flow_block_command cmd,
struct nft_base_chain *basechain,
struct netlink_ext_ack *extack)
{
memset(bo, 0, sizeof(*bo));
bo->net = net;
bo->block = &basechain->flow_block;
bo->command = cmd;
bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
bo->extack = extack;
bo->cb_list_head = &basechain->flow_block.cb_list;
INIT_LIST_HEAD(&bo->cb_list);
}
static int nft_block_offload_cmd(struct nft_base_chain *chain,
struct net_device *dev,
enum flow_block_command cmd)
{
struct netlink_ext_ack extack = {};
struct flow_block_offload bo;
int err;
nft_flow_block_offload_init(&bo, dev_net(dev), cmd, chain, &extack);
err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
if (err < 0)
return err;
return nft_block_setup(chain, &bo, cmd);
}
static void nft_indr_block_cleanup(struct flow_block_cb *block_cb)
{
struct nft_base_chain *basechain = block_cb->indr.data;
struct net_device *dev = block_cb->indr.dev;
struct netlink_ext_ack extack = {};
struct nftables_pernet *nft_net;
struct net *net = dev_net(dev);
struct flow_block_offload bo;
nft_flow_block_offload_init(&bo, dev_net(dev), FLOW_BLOCK_UNBIND,
basechain, &extack);
nft_net = nft_pernet(net);
mutex_lock(&nft_net->commit_mutex);
list_del(&block_cb->driver_list);
list_move(&block_cb->list, &bo.cb_list);
nft_flow_offload_unbind(&bo, basechain);
mutex_unlock(&nft_net->commit_mutex);
}
static int nft_indr_block_offload_cmd(struct nft_base_chain *basechain,
struct net_device *dev,
enum flow_block_command cmd)
{
struct netlink_ext_ack extack = {};
struct flow_block_offload bo;
int err;
nft_flow_block_offload_init(&bo, dev_net(dev), cmd, basechain, &extack);
err = flow_indr_dev_setup_offload(dev, NULL, TC_SETUP_BLOCK, basechain, &bo,
nft_indr_block_cleanup);
if (err < 0)
return err;
if (list_empty(&bo.cb_list))
return -EOPNOTSUPP;
return nft_block_setup(basechain, &bo, cmd);
}
static int nft_chain_offload_cmd(struct nft_base_chain *basechain,
struct net_device *dev,
enum flow_block_command cmd)
{
int err;
if (dev->netdev_ops->ndo_setup_tc)
err = nft_block_offload_cmd(basechain, dev, cmd);
else
err = nft_indr_block_offload_cmd(basechain, dev, cmd);
return err;
}
static int nft_flow_block_chain(struct nft_base_chain *basechain,
const struct net_device *this_dev,
enum flow_block_command cmd)
{
struct net_device *dev;
struct nft_hook *hook;
int err, i = 0;
list_for_each_entry(hook, &basechain->hook_list, list) {
dev = hook->ops.dev;
if (this_dev && this_dev != dev)
continue;
err = nft_chain_offload_cmd(basechain, dev, cmd);
if (err < 0 && cmd == FLOW_BLOCK_BIND) {
if (!this_dev)
goto err_flow_block;
return err;
}
i++;
}
return 0;
err_flow_block:
list_for_each_entry(hook, &basechain->hook_list, list) {
if (i-- <= 0)
break;
dev = hook->ops.dev;
nft_chain_offload_cmd(basechain, dev, FLOW_BLOCK_UNBIND);
}
return err;
}
static int nft_flow_offload_chain(struct nft_chain *chain, u8 *ppolicy,
enum flow_block_command cmd)
{
struct nft_base_chain *basechain;
u8 policy;
if (!nft_is_base_chain(chain))
return -EOPNOTSUPP;
basechain = nft_base_chain(chain);
policy = ppolicy ? *ppolicy : basechain->policy;
/* Only default policy to accept is supported for now. */
if (cmd == FLOW_BLOCK_BIND && policy == NF_DROP)
return -EOPNOTSUPP;
return nft_flow_block_chain(basechain, NULL, cmd);
}
static void nft_flow_rule_offload_abort(struct net *net,
struct nft_trans *trans)
{
struct nftables_pernet *nft_net = nft_pernet(net);
int err = 0;
list_for_each_entry_continue_reverse(trans, &nft_net->commit_list, list) {
if (trans->ctx.family != NFPROTO_NETDEV)
continue;
switch (trans->msg_type) {
case NFT_MSG_NEWCHAIN:
if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) ||
nft_trans_chain_update(trans))
continue;
err = nft_flow_offload_chain(trans->ctx.chain, NULL,
FLOW_BLOCK_UNBIND);
break;
case NFT_MSG_DELCHAIN:
if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
continue;
err = nft_flow_offload_chain(trans->ctx.chain, NULL,
FLOW_BLOCK_BIND);
break;
case NFT_MSG_NEWRULE:
if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
continue;
err = nft_flow_offload_rule(trans->ctx.chain,
nft_trans_rule(trans),
NULL, FLOW_CLS_DESTROY);
break;
case NFT_MSG_DELRULE:
if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
continue;
err = nft_flow_offload_rule(trans->ctx.chain,
nft_trans_rule(trans),
nft_trans_flow_rule(trans),
FLOW_CLS_REPLACE);
break;
}
if (WARN_ON_ONCE(err))
break;
}
}
int nft_flow_rule_offload_commit(struct net *net)
{
struct nftables_pernet *nft_net = nft_pernet(net);
struct nft_trans *trans;
int err = 0;
u8 policy;
list_for_each_entry(trans, &nft_net->commit_list, list) {
if (trans->ctx.family != NFPROTO_NETDEV)
continue;
switch (trans->msg_type) {
case NFT_MSG_NEWCHAIN:
if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) ||
nft_trans_chain_update(trans))
continue;
policy = nft_trans_chain_policy(trans);
err = nft_flow_offload_chain(trans->ctx.chain, &policy,
FLOW_BLOCK_BIND);
break;
case NFT_MSG_DELCHAIN:
if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
continue;
policy = nft_trans_chain_policy(trans);
err = nft_flow_offload_chain(trans->ctx.chain, &policy,
FLOW_BLOCK_UNBIND);
break;
case NFT_MSG_NEWRULE:
if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
continue;
if (trans->ctx.flags & NLM_F_REPLACE ||
!(trans->ctx.flags & NLM_F_APPEND)) {
err = -EOPNOTSUPP;
break;
}
err = nft_flow_offload_rule(trans->ctx.chain,
nft_trans_rule(trans),
nft_trans_flow_rule(trans),
FLOW_CLS_REPLACE);
break;
case NFT_MSG_DELRULE:
if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
continue;
err = nft_flow_offload_rule(trans->ctx.chain,
nft_trans_rule(trans),
NULL, FLOW_CLS_DESTROY);
break;
}
if (err) {
nft_flow_rule_offload_abort(net, trans);
break;
}
}
return err;
}
static struct nft_chain *__nft_offload_get_chain(const struct nftables_pernet *nft_net,
struct net_device *dev)
{
struct nft_base_chain *basechain;
struct nft_hook *hook, *found;
const struct nft_table *table;
struct nft_chain *chain;
list_for_each_entry(table, &nft_net->tables, list) {
if (table->family != NFPROTO_NETDEV)
continue;
list_for_each_entry(chain, &table->chains, list) {
if (!nft_is_base_chain(chain) ||
!(chain->flags & NFT_CHAIN_HW_OFFLOAD))
continue;
found = NULL;
basechain = nft_base_chain(chain);
list_for_each_entry(hook, &basechain->hook_list, list) {
if (hook->ops.dev != dev)
continue;
found = hook;
break;
}
if (!found)
continue;
return chain;
}
}
return NULL;
}
static int nft_offload_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct nftables_pernet *nft_net;
struct net *net = dev_net(dev);
struct nft_chain *chain;
if (event != NETDEV_UNREGISTER)
return NOTIFY_DONE;
nft_net = nft_pernet(net);
mutex_lock(&nft_net->commit_mutex);
chain = __nft_offload_get_chain(nft_net, dev);
if (chain)
nft_flow_block_chain(nft_base_chain(chain), dev,
FLOW_BLOCK_UNBIND);
mutex_unlock(&nft_net->commit_mutex);
return NOTIFY_DONE;
}
static struct notifier_block nft_offload_netdev_notifier = {
.notifier_call = nft_offload_netdev_event,
};
int nft_offload_init(void)
{
return register_netdevice_notifier(&nft_offload_netdev_notifier);
}
void nft_offload_exit(void)
{
unregister_netdevice_notifier(&nft_offload_netdev_notifier);
}
| linux-master | net/netfilter/nf_tables_offload.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netfilter.h>
#include <linux/rhashtable.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/netdevice.h>
#include <linux/if_ether.h>
#include <net/gso.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/ip6_route.h>
#include <net/neighbour.h>
#include <net/netfilter/nf_flow_table.h>
#include <net/netfilter/nf_conntrack_acct.h>
/* For layer 4 checksum field offset. */
#include <linux/tcp.h>
#include <linux/udp.h>
static int nf_flow_state_check(struct flow_offload *flow, int proto,
struct sk_buff *skb, unsigned int thoff)
{
struct tcphdr *tcph;
if (proto != IPPROTO_TCP)
return 0;
tcph = (void *)(skb_network_header(skb) + thoff);
if (unlikely(tcph->fin || tcph->rst)) {
flow_offload_teardown(flow);
return -1;
}
return 0;
}
static void nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff,
__be32 addr, __be32 new_addr)
{
struct tcphdr *tcph;
tcph = (void *)(skb_network_header(skb) + thoff);
inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true);
}
static void nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff,
__be32 addr, __be32 new_addr)
{
struct udphdr *udph;
udph = (void *)(skb_network_header(skb) + thoff);
if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
inet_proto_csum_replace4(&udph->check, skb, addr,
new_addr, true);
if (!udph->check)
udph->check = CSUM_MANGLED_0;
}
}
static void nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph,
unsigned int thoff, __be32 addr,
__be32 new_addr)
{
switch (iph->protocol) {
case IPPROTO_TCP:
nf_flow_nat_ip_tcp(skb, thoff, addr, new_addr);
break;
case IPPROTO_UDP:
nf_flow_nat_ip_udp(skb, thoff, addr, new_addr);
break;
}
}
static void nf_flow_snat_ip(const struct flow_offload *flow,
struct sk_buff *skb, struct iphdr *iph,
unsigned int thoff, enum flow_offload_tuple_dir dir)
{
__be32 addr, new_addr;
switch (dir) {
case FLOW_OFFLOAD_DIR_ORIGINAL:
addr = iph->saddr;
new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
iph->saddr = new_addr;
break;
case FLOW_OFFLOAD_DIR_REPLY:
addr = iph->daddr;
new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr;
iph->daddr = new_addr;
break;
}
csum_replace4(&iph->check, addr, new_addr);
nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
}
static void nf_flow_dnat_ip(const struct flow_offload *flow,
struct sk_buff *skb, struct iphdr *iph,
unsigned int thoff, enum flow_offload_tuple_dir dir)
{
__be32 addr, new_addr;
switch (dir) {
case FLOW_OFFLOAD_DIR_ORIGINAL:
addr = iph->daddr;
new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
iph->daddr = new_addr;
break;
case FLOW_OFFLOAD_DIR_REPLY:
addr = iph->saddr;
new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr;
iph->saddr = new_addr;
break;
}
csum_replace4(&iph->check, addr, new_addr);
nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
}
static void nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
unsigned int thoff, enum flow_offload_tuple_dir dir,
struct iphdr *iph)
{
if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir);
nf_flow_snat_ip(flow, skb, iph, thoff, dir);
}
if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir);
nf_flow_dnat_ip(flow, skb, iph, thoff, dir);
}
}
static bool ip_has_options(unsigned int thoff)
{
return thoff != sizeof(struct iphdr);
}
static void nf_flow_tuple_encap(struct sk_buff *skb,
struct flow_offload_tuple *tuple)
{
struct vlan_ethhdr *veth;
struct pppoe_hdr *phdr;
int i = 0;
if (skb_vlan_tag_present(skb)) {
tuple->encap[i].id = skb_vlan_tag_get(skb);
tuple->encap[i].proto = skb->vlan_proto;
i++;
}
switch (skb->protocol) {
case htons(ETH_P_8021Q):
veth = (struct vlan_ethhdr *)skb_mac_header(skb);
tuple->encap[i].id = ntohs(veth->h_vlan_TCI);
tuple->encap[i].proto = skb->protocol;
break;
case htons(ETH_P_PPP_SES):
phdr = (struct pppoe_hdr *)skb_mac_header(skb);
tuple->encap[i].id = ntohs(phdr->sid);
tuple->encap[i].proto = skb->protocol;
break;
}
}
struct nf_flowtable_ctx {
const struct net_device *in;
u32 offset;
u32 hdrsize;
};
static int nf_flow_tuple_ip(struct nf_flowtable_ctx *ctx, struct sk_buff *skb,
struct flow_offload_tuple *tuple)
{
struct flow_ports *ports;
unsigned int thoff;
struct iphdr *iph;
u8 ipproto;
if (!pskb_may_pull(skb, sizeof(*iph) + ctx->offset))
return -1;
iph = (struct iphdr *)(skb_network_header(skb) + ctx->offset);
thoff = (iph->ihl * 4);
if (ip_is_fragment(iph) ||
unlikely(ip_has_options(thoff)))
return -1;
thoff += ctx->offset;
ipproto = iph->protocol;
switch (ipproto) {
case IPPROTO_TCP:
ctx->hdrsize = sizeof(struct tcphdr);
break;
case IPPROTO_UDP:
ctx->hdrsize = sizeof(struct udphdr);
break;
#ifdef CONFIG_NF_CT_PROTO_GRE
case IPPROTO_GRE:
ctx->hdrsize = sizeof(struct gre_base_hdr);
break;
#endif
default:
return -1;
}
if (iph->ttl <= 1)
return -1;
if (!pskb_may_pull(skb, thoff + ctx->hdrsize))
return -1;
switch (ipproto) {
case IPPROTO_TCP:
case IPPROTO_UDP:
ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
tuple->src_port = ports->source;
tuple->dst_port = ports->dest;
break;
case IPPROTO_GRE: {
struct gre_base_hdr *greh;
greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff);
if ((greh->flags & GRE_VERSION) != GRE_VERSION_0)
return -1;
break;
}
}
iph = (struct iphdr *)(skb_network_header(skb) + ctx->offset);
tuple->src_v4.s_addr = iph->saddr;
tuple->dst_v4.s_addr = iph->daddr;
tuple->l3proto = AF_INET;
tuple->l4proto = ipproto;
tuple->iifidx = ctx->in->ifindex;
nf_flow_tuple_encap(skb, tuple);
return 0;
}
/* Based on ip_exceeds_mtu(). */
static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
{
if (skb->len <= mtu)
return false;
if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
return false;
return true;
}
static inline bool nf_flow_dst_check(struct flow_offload_tuple *tuple)
{
if (tuple->xmit_type != FLOW_OFFLOAD_XMIT_NEIGH &&
tuple->xmit_type != FLOW_OFFLOAD_XMIT_XFRM)
return true;
return dst_check(tuple->dst_cache, tuple->dst_cookie);
}
static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
const struct nf_hook_state *state,
struct dst_entry *dst)
{
skb_orphan(skb);
skb_dst_set_noref(skb, dst);
dst_output(state->net, state->sk, skb);
return NF_STOLEN;
}
static bool nf_flow_skb_encap_protocol(const struct sk_buff *skb, __be16 proto,
u32 *offset)
{
struct vlan_ethhdr *veth;
switch (skb->protocol) {
case htons(ETH_P_8021Q):
veth = (struct vlan_ethhdr *)skb_mac_header(skb);
if (veth->h_vlan_encapsulated_proto == proto) {
*offset += VLAN_HLEN;
return true;
}
break;
case htons(ETH_P_PPP_SES):
if (nf_flow_pppoe_proto(skb) == proto) {
*offset += PPPOE_SES_HLEN;
return true;
}
break;
}
return false;
}
static void nf_flow_encap_pop(struct sk_buff *skb,
struct flow_offload_tuple_rhash *tuplehash)
{
struct vlan_hdr *vlan_hdr;
int i;
for (i = 0; i < tuplehash->tuple.encap_num; i++) {
if (skb_vlan_tag_present(skb)) {
__vlan_hwaccel_clear_tag(skb);
continue;
}
switch (skb->protocol) {
case htons(ETH_P_8021Q):
vlan_hdr = (struct vlan_hdr *)skb->data;
__skb_pull(skb, VLAN_HLEN);
vlan_set_encap_proto(skb, vlan_hdr);
skb_reset_network_header(skb);
break;
case htons(ETH_P_PPP_SES):
skb->protocol = nf_flow_pppoe_proto(skb);
skb_pull(skb, PPPOE_SES_HLEN);
skb_reset_network_header(skb);
break;
}
}
}
static unsigned int nf_flow_queue_xmit(struct net *net, struct sk_buff *skb,
const struct flow_offload_tuple_rhash *tuplehash,
unsigned short type)
{
struct net_device *outdev;
outdev = dev_get_by_index_rcu(net, tuplehash->tuple.out.ifidx);
if (!outdev)
return NF_DROP;
skb->dev = outdev;
dev_hard_header(skb, skb->dev, type, tuplehash->tuple.out.h_dest,
tuplehash->tuple.out.h_source, skb->len);
dev_queue_xmit(skb);
return NF_STOLEN;
}
static struct flow_offload_tuple_rhash *
nf_flow_offload_lookup(struct nf_flowtable_ctx *ctx,
struct nf_flowtable *flow_table, struct sk_buff *skb)
{
struct flow_offload_tuple tuple = {};
if (skb->protocol != htons(ETH_P_IP) &&
!nf_flow_skb_encap_protocol(skb, htons(ETH_P_IP), &ctx->offset))
return NULL;
if (nf_flow_tuple_ip(ctx, skb, &tuple) < 0)
return NULL;
return flow_offload_lookup(flow_table, &tuple);
}
static int nf_flow_offload_forward(struct nf_flowtable_ctx *ctx,
struct nf_flowtable *flow_table,
struct flow_offload_tuple_rhash *tuplehash,
struct sk_buff *skb)
{
enum flow_offload_tuple_dir dir;
struct flow_offload *flow;
unsigned int thoff, mtu;
struct iphdr *iph;
dir = tuplehash->tuple.dir;
flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
mtu = flow->tuplehash[dir].tuple.mtu + ctx->offset;
if (unlikely(nf_flow_exceeds_mtu(skb, mtu)))
return 0;
iph = (struct iphdr *)(skb_network_header(skb) + ctx->offset);
thoff = (iph->ihl * 4) + ctx->offset;
if (nf_flow_state_check(flow, iph->protocol, skb, thoff))
return 0;
if (!nf_flow_dst_check(&tuplehash->tuple)) {
flow_offload_teardown(flow);
return 0;
}
if (skb_try_make_writable(skb, thoff + ctx->hdrsize))
return -1;
flow_offload_refresh(flow_table, flow, false);
nf_flow_encap_pop(skb, tuplehash);
thoff -= ctx->offset;
iph = ip_hdr(skb);
nf_flow_nat_ip(flow, skb, thoff, dir, iph);
ip_decrease_ttl(iph);
skb_clear_tstamp(skb);
if (flow_table->flags & NF_FLOWTABLE_COUNTER)
nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
return 1;
}
unsigned int
nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct flow_offload_tuple_rhash *tuplehash;
struct nf_flowtable *flow_table = priv;
enum flow_offload_tuple_dir dir;
struct nf_flowtable_ctx ctx = {
.in = state->in,
};
struct flow_offload *flow;
struct net_device *outdev;
struct rtable *rt;
__be32 nexthop;
int ret;
tuplehash = nf_flow_offload_lookup(&ctx, flow_table, skb);
if (!tuplehash)
return NF_ACCEPT;
ret = nf_flow_offload_forward(&ctx, flow_table, tuplehash, skb);
if (ret < 0)
return NF_DROP;
else if (ret == 0)
return NF_ACCEPT;
if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
rt = (struct rtable *)tuplehash->tuple.dst_cache;
memset(skb->cb, 0, sizeof(struct inet_skb_parm));
IPCB(skb)->iif = skb->dev->ifindex;
IPCB(skb)->flags = IPSKB_FORWARDED;
return nf_flow_xmit_xfrm(skb, state, &rt->dst);
}
dir = tuplehash->tuple.dir;
flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
switch (tuplehash->tuple.xmit_type) {
case FLOW_OFFLOAD_XMIT_NEIGH:
rt = (struct rtable *)tuplehash->tuple.dst_cache;
outdev = rt->dst.dev;
skb->dev = outdev;
nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
skb_dst_set_noref(skb, &rt->dst);
neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
ret = NF_STOLEN;
break;
case FLOW_OFFLOAD_XMIT_DIRECT:
ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IP);
if (ret == NF_DROP)
flow_offload_teardown(flow);
break;
default:
WARN_ON_ONCE(1);
ret = NF_DROP;
break;
}
return ret;
}
EXPORT_SYMBOL_GPL(nf_flow_offload_ip_hook);
static void nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff,
struct in6_addr *addr,
struct in6_addr *new_addr,
struct ipv6hdr *ip6h)
{
struct tcphdr *tcph;
tcph = (void *)(skb_network_header(skb) + thoff);
inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32,
new_addr->s6_addr32, true);
}
static void nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff,
struct in6_addr *addr,
struct in6_addr *new_addr)
{
struct udphdr *udph;
udph = (void *)(skb_network_header(skb) + thoff);
if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32,
new_addr->s6_addr32, true);
if (!udph->check)
udph->check = CSUM_MANGLED_0;
}
}
static void nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h,
unsigned int thoff, struct in6_addr *addr,
struct in6_addr *new_addr)
{
switch (ip6h->nexthdr) {
case IPPROTO_TCP:
nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr, ip6h);
break;
case IPPROTO_UDP:
nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr);
break;
}
}
static void nf_flow_snat_ipv6(const struct flow_offload *flow,
struct sk_buff *skb, struct ipv6hdr *ip6h,
unsigned int thoff,
enum flow_offload_tuple_dir dir)
{
struct in6_addr addr, new_addr;
switch (dir) {
case FLOW_OFFLOAD_DIR_ORIGINAL:
addr = ip6h->saddr;
new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6;
ip6h->saddr = new_addr;
break;
case FLOW_OFFLOAD_DIR_REPLY:
addr = ip6h->daddr;
new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6;
ip6h->daddr = new_addr;
break;
}
nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
}
static void nf_flow_dnat_ipv6(const struct flow_offload *flow,
struct sk_buff *skb, struct ipv6hdr *ip6h,
unsigned int thoff,
enum flow_offload_tuple_dir dir)
{
struct in6_addr addr, new_addr;
switch (dir) {
case FLOW_OFFLOAD_DIR_ORIGINAL:
addr = ip6h->daddr;
new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6;
ip6h->daddr = new_addr;
break;
case FLOW_OFFLOAD_DIR_REPLY:
addr = ip6h->saddr;
new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6;
ip6h->saddr = new_addr;
break;
}
nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
}
static void nf_flow_nat_ipv6(const struct flow_offload *flow,
struct sk_buff *skb,
enum flow_offload_tuple_dir dir,
struct ipv6hdr *ip6h)
{
unsigned int thoff = sizeof(*ip6h);
if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir);
nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir);
}
if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir);
nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir);
}
}
static int nf_flow_tuple_ipv6(struct nf_flowtable_ctx *ctx, struct sk_buff *skb,
struct flow_offload_tuple *tuple)
{
struct flow_ports *ports;
struct ipv6hdr *ip6h;
unsigned int thoff;
u8 nexthdr;
thoff = sizeof(*ip6h) + ctx->offset;
if (!pskb_may_pull(skb, thoff))
return -1;
ip6h = (struct ipv6hdr *)(skb_network_header(skb) + ctx->offset);
nexthdr = ip6h->nexthdr;
switch (nexthdr) {
case IPPROTO_TCP:
ctx->hdrsize = sizeof(struct tcphdr);
break;
case IPPROTO_UDP:
ctx->hdrsize = sizeof(struct udphdr);
break;
#ifdef CONFIG_NF_CT_PROTO_GRE
case IPPROTO_GRE:
ctx->hdrsize = sizeof(struct gre_base_hdr);
break;
#endif
default:
return -1;
}
if (ip6h->hop_limit <= 1)
return -1;
if (!pskb_may_pull(skb, thoff + ctx->hdrsize))
return -1;
switch (nexthdr) {
case IPPROTO_TCP:
case IPPROTO_UDP:
ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
tuple->src_port = ports->source;
tuple->dst_port = ports->dest;
break;
case IPPROTO_GRE: {
struct gre_base_hdr *greh;
greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff);
if ((greh->flags & GRE_VERSION) != GRE_VERSION_0)
return -1;
break;
}
}
ip6h = (struct ipv6hdr *)(skb_network_header(skb) + ctx->offset);
tuple->src_v6 = ip6h->saddr;
tuple->dst_v6 = ip6h->daddr;
tuple->l3proto = AF_INET6;
tuple->l4proto = nexthdr;
tuple->iifidx = ctx->in->ifindex;
nf_flow_tuple_encap(skb, tuple);
return 0;
}
static int nf_flow_offload_ipv6_forward(struct nf_flowtable_ctx *ctx,
struct nf_flowtable *flow_table,
struct flow_offload_tuple_rhash *tuplehash,
struct sk_buff *skb)
{
enum flow_offload_tuple_dir dir;
struct flow_offload *flow;
unsigned int thoff, mtu;
struct ipv6hdr *ip6h;
dir = tuplehash->tuple.dir;
flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
mtu = flow->tuplehash[dir].tuple.mtu + ctx->offset;
if (unlikely(nf_flow_exceeds_mtu(skb, mtu)))
return 0;
ip6h = (struct ipv6hdr *)(skb_network_header(skb) + ctx->offset);
thoff = sizeof(*ip6h) + ctx->offset;
if (nf_flow_state_check(flow, ip6h->nexthdr, skb, thoff))
return 0;
if (!nf_flow_dst_check(&tuplehash->tuple)) {
flow_offload_teardown(flow);
return 0;
}
if (skb_try_make_writable(skb, thoff + ctx->hdrsize))
return -1;
flow_offload_refresh(flow_table, flow, false);
nf_flow_encap_pop(skb, tuplehash);
ip6h = ipv6_hdr(skb);
nf_flow_nat_ipv6(flow, skb, dir, ip6h);
ip6h->hop_limit--;
skb_clear_tstamp(skb);
if (flow_table->flags & NF_FLOWTABLE_COUNTER)
nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
return 1;
}
static struct flow_offload_tuple_rhash *
nf_flow_offload_ipv6_lookup(struct nf_flowtable_ctx *ctx,
struct nf_flowtable *flow_table,
struct sk_buff *skb)
{
struct flow_offload_tuple tuple = {};
if (skb->protocol != htons(ETH_P_IPV6) &&
!nf_flow_skb_encap_protocol(skb, htons(ETH_P_IPV6), &ctx->offset))
return NULL;
if (nf_flow_tuple_ipv6(ctx, skb, &tuple) < 0)
return NULL;
return flow_offload_lookup(flow_table, &tuple);
}
unsigned int
nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct flow_offload_tuple_rhash *tuplehash;
struct nf_flowtable *flow_table = priv;
enum flow_offload_tuple_dir dir;
struct nf_flowtable_ctx ctx = {
.in = state->in,
};
const struct in6_addr *nexthop;
struct flow_offload *flow;
struct net_device *outdev;
struct rt6_info *rt;
int ret;
tuplehash = nf_flow_offload_ipv6_lookup(&ctx, flow_table, skb);
if (tuplehash == NULL)
return NF_ACCEPT;
ret = nf_flow_offload_ipv6_forward(&ctx, flow_table, tuplehash, skb);
if (ret < 0)
return NF_DROP;
else if (ret == 0)
return NF_ACCEPT;
if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
IP6CB(skb)->iif = skb->dev->ifindex;
IP6CB(skb)->flags = IP6SKB_FORWARDED;
return nf_flow_xmit_xfrm(skb, state, &rt->dst);
}
dir = tuplehash->tuple.dir;
flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
switch (tuplehash->tuple.xmit_type) {
case FLOW_OFFLOAD_XMIT_NEIGH:
rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
outdev = rt->dst.dev;
skb->dev = outdev;
nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
skb_dst_set_noref(skb, &rt->dst);
neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
ret = NF_STOLEN;
break;
case FLOW_OFFLOAD_XMIT_DIRECT:
ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IPV6);
if (ret == NF_DROP)
flow_offload_teardown(flow);
break;
default:
WARN_ON_ONCE(1);
ret = NF_DROP;
break;
}
return ret;
}
EXPORT_SYMBOL_GPL(nf_flow_offload_ipv6_hook);
| linux-master | net/netfilter/nf_flow_table_ip.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/types.h>
#include <linux/netfilter.h>
#include <net/tcp.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_extend.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
int nf_ct_seqadj_init(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
s32 off)
{
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
struct nf_conn_seqadj *seqadj;
struct nf_ct_seqadj *this_way;
if (off == 0)
return 0;
set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
seqadj = nfct_seqadj(ct);
this_way = &seqadj->seq[dir];
this_way->offset_before = off;
this_way->offset_after = off;
return 0;
}
EXPORT_SYMBOL_GPL(nf_ct_seqadj_init);
int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
__be32 seq, s32 off)
{
struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
struct nf_ct_seqadj *this_way;
if (off == 0)
return 0;
if (unlikely(!seqadj)) {
WARN_ONCE(1, "Missing nfct_seqadj_ext_add() setup call\n");
return 0;
}
set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
spin_lock_bh(&ct->lock);
this_way = &seqadj->seq[dir];
if (this_way->offset_before == this_way->offset_after ||
before(this_way->correction_pos, ntohl(seq))) {
this_way->correction_pos = ntohl(seq);
this_way->offset_before = this_way->offset_after;
this_way->offset_after += off;
}
spin_unlock_bh(&ct->lock);
return 0;
}
EXPORT_SYMBOL_GPL(nf_ct_seqadj_set);
void nf_ct_tcp_seqadj_set(struct sk_buff *skb,
struct nf_conn *ct, enum ip_conntrack_info ctinfo,
s32 off)
{
const struct tcphdr *th;
if (nf_ct_protonum(ct) != IPPROTO_TCP)
return;
th = (struct tcphdr *)(skb_network_header(skb) + ip_hdrlen(skb));
nf_ct_seqadj_set(ct, ctinfo, th->seq, off);
}
EXPORT_SYMBOL_GPL(nf_ct_tcp_seqadj_set);
/* Adjust one found SACK option including checksum correction */
static void nf_ct_sack_block_adjust(struct sk_buff *skb,
struct tcphdr *tcph,
unsigned int sackoff,
unsigned int sackend,
struct nf_ct_seqadj *seq)
{
while (sackoff < sackend) {
struct tcp_sack_block_wire *sack;
__be32 new_start_seq, new_end_seq;
sack = (void *)skb->data + sackoff;
if (after(ntohl(sack->start_seq) - seq->offset_before,
seq->correction_pos))
new_start_seq = htonl(ntohl(sack->start_seq) -
seq->offset_after);
else
new_start_seq = htonl(ntohl(sack->start_seq) -
seq->offset_before);
if (after(ntohl(sack->end_seq) - seq->offset_before,
seq->correction_pos))
new_end_seq = htonl(ntohl(sack->end_seq) -
seq->offset_after);
else
new_end_seq = htonl(ntohl(sack->end_seq) -
seq->offset_before);
pr_debug("sack_adjust: start_seq: %u->%u, end_seq: %u->%u\n",
ntohl(sack->start_seq), ntohl(new_start_seq),
ntohl(sack->end_seq), ntohl(new_end_seq));
inet_proto_csum_replace4(&tcph->check, skb,
sack->start_seq, new_start_seq, false);
inet_proto_csum_replace4(&tcph->check, skb,
sack->end_seq, new_end_seq, false);
sack->start_seq = new_start_seq;
sack->end_seq = new_end_seq;
sackoff += sizeof(*sack);
}
}
/* TCP SACK sequence number adjustment */
static unsigned int nf_ct_sack_adjust(struct sk_buff *skb,
unsigned int protoff,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo)
{
struct tcphdr *tcph = (void *)skb->data + protoff;
struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
unsigned int dir, optoff, optend;
optoff = protoff + sizeof(struct tcphdr);
optend = protoff + tcph->doff * 4;
if (skb_ensure_writable(skb, optend))
return 0;
tcph = (void *)skb->data + protoff;
dir = CTINFO2DIR(ctinfo);
while (optoff < optend) {
/* Usually: option, length. */
unsigned char *op = skb->data + optoff;
switch (op[0]) {
case TCPOPT_EOL:
return 1;
case TCPOPT_NOP:
optoff++;
continue;
default:
/* no partial options */
if (optoff + 1 == optend ||
optoff + op[1] > optend ||
op[1] < 2)
return 0;
if (op[0] == TCPOPT_SACK &&
op[1] >= 2+TCPOLEN_SACK_PERBLOCK &&
((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0)
nf_ct_sack_block_adjust(skb, tcph, optoff + 2,
optoff+op[1],
&seqadj->seq[!dir]);
optoff += op[1];
}
}
return 1;
}
/* TCP sequence number adjustment. Returns 1 on success, 0 on failure */
int nf_ct_seq_adjust(struct sk_buff *skb,
struct nf_conn *ct, enum ip_conntrack_info ctinfo,
unsigned int protoff)
{
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
struct tcphdr *tcph;
__be32 newseq, newack;
s32 seqoff, ackoff;
struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
struct nf_ct_seqadj *this_way, *other_way;
int res = 1;
this_way = &seqadj->seq[dir];
other_way = &seqadj->seq[!dir];
if (skb_ensure_writable(skb, protoff + sizeof(*tcph)))
return 0;
tcph = (void *)skb->data + protoff;
spin_lock_bh(&ct->lock);
if (after(ntohl(tcph->seq), this_way->correction_pos))
seqoff = this_way->offset_after;
else
seqoff = this_way->offset_before;
newseq = htonl(ntohl(tcph->seq) + seqoff);
inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, false);
pr_debug("Adjusting sequence number from %u->%u\n",
ntohl(tcph->seq), ntohl(newseq));
tcph->seq = newseq;
if (!tcph->ack)
goto out;
if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
other_way->correction_pos))
ackoff = other_way->offset_after;
else
ackoff = other_way->offset_before;
newack = htonl(ntohl(tcph->ack_seq) - ackoff);
inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack,
false);
pr_debug("Adjusting ack number from %u->%u, ack from %u->%u\n",
ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
ntohl(newack));
tcph->ack_seq = newack;
res = nf_ct_sack_adjust(skb, protoff, ct, ctinfo);
out:
spin_unlock_bh(&ct->lock);
return res;
}
EXPORT_SYMBOL_GPL(nf_ct_seq_adjust);
s32 nf_ct_seq_offset(const struct nf_conn *ct,
enum ip_conntrack_dir dir,
u32 seq)
{
struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
struct nf_ct_seqadj *this_way;
if (!seqadj)
return 0;
this_way = &seqadj->seq[dir];
return after(seq, this_way->correction_pos) ?
this_way->offset_after : this_way->offset_before;
}
EXPORT_SYMBOL_GPL(nf_ct_seq_offset);
| linux-master | net/netfilter/nf_conntrack_seqadj.c |
// SPDX-License-Identifier: GPL-2.0-only
/* iptables module for using new netfilter netlink queue
*
* (C) 2005 by Harald Welte <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter.h>
#include <linux/netfilter_arp.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_NFQUEUE.h>
#include <net/netfilter/nf_queue.h>
MODULE_AUTHOR("Harald Welte <[email protected]>");
MODULE_DESCRIPTION("Xtables: packet forwarding to netlink");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_NFQUEUE");
MODULE_ALIAS("ip6t_NFQUEUE");
MODULE_ALIAS("arpt_NFQUEUE");
static u32 jhash_initval __read_mostly;
static unsigned int
nfqueue_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_NFQ_info *tinfo = par->targinfo;
return NF_QUEUE_NR(tinfo->queuenum);
}
static unsigned int
nfqueue_tg_v1(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_NFQ_info_v1 *info = par->targinfo;
u32 queue = info->queuenum;
if (info->queues_total > 1) {
queue = nfqueue_hash(skb, queue, info->queues_total,
xt_family(par), jhash_initval);
}
return NF_QUEUE_NR(queue);
}
static unsigned int
nfqueue_tg_v2(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_NFQ_info_v2 *info = par->targinfo;
unsigned int ret = nfqueue_tg_v1(skb, par);
if (info->bypass)
ret |= NF_VERDICT_FLAG_QUEUE_BYPASS;
return ret;
}
static int nfqueue_tg_check(const struct xt_tgchk_param *par)
{
const struct xt_NFQ_info_v3 *info = par->targinfo;
u32 maxid;
init_hashrandom(&jhash_initval);
if (info->queues_total == 0) {
pr_info_ratelimited("number of total queues is 0\n");
return -EINVAL;
}
maxid = info->queues_total - 1 + info->queuenum;
if (maxid > 0xffff) {
pr_info_ratelimited("number of queues (%u) out of range (got %u)\n",
info->queues_total, maxid);
return -ERANGE;
}
if (par->target->revision == 2 && info->flags > 1)
return -EINVAL;
if (par->target->revision == 3 && info->flags & ~NFQ_FLAG_MASK)
return -EINVAL;
return 0;
}
static unsigned int
nfqueue_tg_v3(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_NFQ_info_v3 *info = par->targinfo;
u32 queue = info->queuenum;
int ret;
if (info->queues_total > 1) {
if (info->flags & NFQ_FLAG_CPU_FANOUT) {
int cpu = smp_processor_id();
queue = info->queuenum + cpu % info->queues_total;
} else {
queue = nfqueue_hash(skb, queue, info->queues_total,
xt_family(par), jhash_initval);
}
}
ret = NF_QUEUE_NR(queue);
if (info->flags & NFQ_FLAG_BYPASS)
ret |= NF_VERDICT_FLAG_QUEUE_BYPASS;
return ret;
}
static struct xt_target nfqueue_tg_reg[] __read_mostly = {
{
.name = "NFQUEUE",
.family = NFPROTO_UNSPEC,
.target = nfqueue_tg,
.targetsize = sizeof(struct xt_NFQ_info),
.me = THIS_MODULE,
},
{
.name = "NFQUEUE",
.revision = 1,
.family = NFPROTO_UNSPEC,
.checkentry = nfqueue_tg_check,
.target = nfqueue_tg_v1,
.targetsize = sizeof(struct xt_NFQ_info_v1),
.me = THIS_MODULE,
},
{
.name = "NFQUEUE",
.revision = 2,
.family = NFPROTO_UNSPEC,
.checkentry = nfqueue_tg_check,
.target = nfqueue_tg_v2,
.targetsize = sizeof(struct xt_NFQ_info_v2),
.me = THIS_MODULE,
},
{
.name = "NFQUEUE",
.revision = 3,
.family = NFPROTO_UNSPEC,
.checkentry = nfqueue_tg_check,
.target = nfqueue_tg_v3,
.targetsize = sizeof(struct xt_NFQ_info_v3),
.me = THIS_MODULE,
},
};
static int __init nfqueue_tg_init(void)
{
return xt_register_targets(nfqueue_tg_reg, ARRAY_SIZE(nfqueue_tg_reg));
}
static void __exit nfqueue_tg_exit(void)
{
xt_unregister_targets(nfqueue_tg_reg, ARRAY_SIZE(nfqueue_tg_reg));
}
module_init(nfqueue_tg_init);
module_exit(nfqueue_tg_exit);
| linux-master | net/netfilter/xt_NFQUEUE.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Transparent proxy support for Linux/iptables
*
* Copyright (c) 2006-2010 BalaBit IT Ltd.
* Author: Balazs Scheidler, Krisztian Kovacs
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <net/checksum.h>
#include <net/udp.h>
#include <net/tcp.h>
#include <net/inet_sock.h>
#include <net/inet_hashtables.h>
#include <linux/inetdevice.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
#define XT_TPROXY_HAVE_IPV6 1
#include <net/if_inet6.h>
#include <net/addrconf.h>
#include <net/inet6_hashtables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
#endif
#include <net/netfilter/nf_tproxy.h>
#include <linux/netfilter/xt_TPROXY.h>
static unsigned int
tproxy_tg4(struct net *net, struct sk_buff *skb, __be32 laddr, __be16 lport,
u_int32_t mark_mask, u_int32_t mark_value)
{
const struct iphdr *iph = ip_hdr(skb);
struct udphdr _hdr, *hp;
struct sock *sk;
hp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_hdr), &_hdr);
if (hp == NULL)
return NF_DROP;
/* check if there's an ongoing connection on the packet
* addresses, this happens if the redirect already happened
* and the current packet belongs to an already established
* connection */
sk = nf_tproxy_get_sock_v4(net, skb, iph->protocol,
iph->saddr, iph->daddr,
hp->source, hp->dest,
skb->dev, NF_TPROXY_LOOKUP_ESTABLISHED);
laddr = nf_tproxy_laddr4(skb, laddr, iph->daddr);
if (!lport)
lport = hp->dest;
/* UDP has no TCP_TIME_WAIT state, so we never enter here */
if (sk && sk->sk_state == TCP_TIME_WAIT)
/* reopening a TIME_WAIT connection needs special handling */
sk = nf_tproxy_handle_time_wait4(net, skb, laddr, lport, sk);
else if (!sk)
/* no, there's no established connection, check if
* there's a listener on the redirected addr/port */
sk = nf_tproxy_get_sock_v4(net, skb, iph->protocol,
iph->saddr, laddr,
hp->source, lport,
skb->dev, NF_TPROXY_LOOKUP_LISTENER);
/* NOTE: assign_sock consumes our sk reference */
if (sk && nf_tproxy_sk_is_transparent(sk)) {
/* This should be in a separate target, but we don't do multiple
targets on the same rule yet */
skb->mark = (skb->mark & ~mark_mask) ^ mark_value;
nf_tproxy_assign_sock(skb, sk);
return NF_ACCEPT;
}
return NF_DROP;
}
static unsigned int
tproxy_tg4_v0(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_tproxy_target_info *tgi = par->targinfo;
return tproxy_tg4(xt_net(par), skb, tgi->laddr, tgi->lport,
tgi->mark_mask, tgi->mark_value);
}
static unsigned int
tproxy_tg4_v1(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_tproxy_target_info_v1 *tgi = par->targinfo;
return tproxy_tg4(xt_net(par), skb, tgi->laddr.ip, tgi->lport,
tgi->mark_mask, tgi->mark_value);
}
#ifdef XT_TPROXY_HAVE_IPV6
static unsigned int
tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ipv6hdr *iph = ipv6_hdr(skb);
const struct xt_tproxy_target_info_v1 *tgi = par->targinfo;
struct udphdr _hdr, *hp;
struct sock *sk;
const struct in6_addr *laddr;
__be16 lport;
int thoff = 0;
int tproto;
tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL);
if (tproto < 0)
return NF_DROP;
hp = skb_header_pointer(skb, thoff, sizeof(_hdr), &_hdr);
if (!hp)
return NF_DROP;
/* check if there's an ongoing connection on the packet
* addresses, this happens if the redirect already happened
* and the current packet belongs to an already established
* connection */
sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff, tproto,
&iph->saddr, &iph->daddr,
hp->source, hp->dest,
xt_in(par), NF_TPROXY_LOOKUP_ESTABLISHED);
laddr = nf_tproxy_laddr6(skb, &tgi->laddr.in6, &iph->daddr);
lport = tgi->lport ? tgi->lport : hp->dest;
/* UDP has no TCP_TIME_WAIT state, so we never enter here */
if (sk && sk->sk_state == TCP_TIME_WAIT) {
const struct xt_tproxy_target_info_v1 *tgi = par->targinfo;
/* reopening a TIME_WAIT connection needs special handling */
sk = nf_tproxy_handle_time_wait6(skb, tproto, thoff,
xt_net(par),
&tgi->laddr.in6,
tgi->lport,
sk);
}
else if (!sk)
/* no there's no established connection, check if
* there's a listener on the redirected addr/port */
sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff,
tproto, &iph->saddr, laddr,
hp->source, lport,
xt_in(par), NF_TPROXY_LOOKUP_LISTENER);
/* NOTE: assign_sock consumes our sk reference */
if (sk && nf_tproxy_sk_is_transparent(sk)) {
/* This should be in a separate target, but we don't do multiple
targets on the same rule yet */
skb->mark = (skb->mark & ~tgi->mark_mask) ^ tgi->mark_value;
nf_tproxy_assign_sock(skb, sk);
return NF_ACCEPT;
}
return NF_DROP;
}
static int tproxy_tg6_check(const struct xt_tgchk_param *par)
{
const struct ip6t_ip6 *i = par->entryinfo;
int err;
err = nf_defrag_ipv6_enable(par->net);
if (err)
return err;
if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP) &&
!(i->invflags & IP6T_INV_PROTO))
return 0;
pr_info_ratelimited("Can be used only with -p tcp or -p udp\n");
return -EINVAL;
}
static void tproxy_tg6_destroy(const struct xt_tgdtor_param *par)
{
nf_defrag_ipv6_disable(par->net);
}
#endif
static int tproxy_tg4_check(const struct xt_tgchk_param *par)
{
const struct ipt_ip *i = par->entryinfo;
int err;
err = nf_defrag_ipv4_enable(par->net);
if (err)
return err;
if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP)
&& !(i->invflags & IPT_INV_PROTO))
return 0;
pr_info_ratelimited("Can be used only with -p tcp or -p udp\n");
return -EINVAL;
}
static void tproxy_tg4_destroy(const struct xt_tgdtor_param *par)
{
nf_defrag_ipv4_disable(par->net);
}
static struct xt_target tproxy_tg_reg[] __read_mostly = {
{
.name = "TPROXY",
.family = NFPROTO_IPV4,
.table = "mangle",
.target = tproxy_tg4_v0,
.revision = 0,
.targetsize = sizeof(struct xt_tproxy_target_info),
.checkentry = tproxy_tg4_check,
.destroy = tproxy_tg4_destroy,
.hooks = 1 << NF_INET_PRE_ROUTING,
.me = THIS_MODULE,
},
{
.name = "TPROXY",
.family = NFPROTO_IPV4,
.table = "mangle",
.target = tproxy_tg4_v1,
.revision = 1,
.targetsize = sizeof(struct xt_tproxy_target_info_v1),
.checkentry = tproxy_tg4_check,
.destroy = tproxy_tg4_destroy,
.hooks = 1 << NF_INET_PRE_ROUTING,
.me = THIS_MODULE,
},
#ifdef XT_TPROXY_HAVE_IPV6
{
.name = "TPROXY",
.family = NFPROTO_IPV6,
.table = "mangle",
.target = tproxy_tg6_v1,
.revision = 1,
.targetsize = sizeof(struct xt_tproxy_target_info_v1),
.checkentry = tproxy_tg6_check,
.destroy = tproxy_tg6_destroy,
.hooks = 1 << NF_INET_PRE_ROUTING,
.me = THIS_MODULE,
},
#endif
};
static int __init tproxy_tg_init(void)
{
return xt_register_targets(tproxy_tg_reg, ARRAY_SIZE(tproxy_tg_reg));
}
static void __exit tproxy_tg_exit(void)
{
xt_unregister_targets(tproxy_tg_reg, ARRAY_SIZE(tproxy_tg_reg));
}
module_init(tproxy_tg_init);
module_exit(tproxy_tg_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Balazs Scheidler, Krisztian Kovacs");
MODULE_DESCRIPTION("Netfilter transparent proxy (TPROXY) target module.");
MODULE_ALIAS("ipt_TPROXY");
MODULE_ALIAS("ip6t_TPROXY");
| linux-master | net/netfilter/xt_TPROXY.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* xt_mark - Netfilter module to match NFMARK value
*
* (C) 1999-2001 Marc Boucher <[email protected]>
* Copyright © CC Computer Consultants GmbH, 2007 - 2008
* Jan Engelhardt <[email protected]>
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter/xt_mark.h>
#include <linux/netfilter/x_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marc Boucher <[email protected]>");
MODULE_DESCRIPTION("Xtables: packet mark operations");
MODULE_ALIAS("ipt_mark");
MODULE_ALIAS("ip6t_mark");
MODULE_ALIAS("ipt_MARK");
MODULE_ALIAS("ip6t_MARK");
MODULE_ALIAS("arpt_MARK");
static unsigned int
mark_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_mark_tginfo2 *info = par->targinfo;
skb->mark = (skb->mark & ~info->mask) ^ info->mark;
return XT_CONTINUE;
}
static bool
mark_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_mark_mtinfo1 *info = par->matchinfo;
return ((skb->mark & info->mask) == info->mark) ^ info->invert;
}
static struct xt_target mark_tg_reg __read_mostly = {
.name = "MARK",
.revision = 2,
.family = NFPROTO_UNSPEC,
.target = mark_tg,
.targetsize = sizeof(struct xt_mark_tginfo2),
.me = THIS_MODULE,
};
static struct xt_match mark_mt_reg __read_mostly = {
.name = "mark",
.revision = 1,
.family = NFPROTO_UNSPEC,
.match = mark_mt,
.matchsize = sizeof(struct xt_mark_mtinfo1),
.me = THIS_MODULE,
};
static int __init mark_mt_init(void)
{
int ret;
ret = xt_register_target(&mark_tg_reg);
if (ret < 0)
return ret;
ret = xt_register_match(&mark_mt_reg);
if (ret < 0) {
xt_unregister_target(&mark_tg_reg);
return ret;
}
return 0;
}
static void __exit mark_mt_exit(void)
{
xt_unregister_match(&mark_mt_reg);
xt_unregister_target(&mark_tg_reg);
}
module_init(mark_mt_init);
module_exit(mark_mt_exit);
| linux-master | net/netfilter/xt_mark.c |
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/module.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_tproxy.h>
#include <net/inet_sock.h>
#include <net/tcp.h>
#include <linux/if_ether.h>
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
#endif
struct nft_tproxy {
u8 sreg_addr;
u8 sreg_port;
u8 family;
};
static void nft_tproxy_eval_v4(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_tproxy *priv = nft_expr_priv(expr);
struct sk_buff *skb = pkt->skb;
const struct iphdr *iph = ip_hdr(skb);
struct udphdr _hdr, *hp;
__be32 taddr = 0;
__be16 tport = 0;
struct sock *sk;
if (pkt->tprot != IPPROTO_TCP &&
pkt->tprot != IPPROTO_UDP) {
regs->verdict.code = NFT_BREAK;
return;
}
hp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_hdr), &_hdr);
if (!hp) {
regs->verdict.code = NFT_BREAK;
return;
}
/* check if there's an ongoing connection on the packet addresses, this
* happens if the redirect already happened and the current packet
* belongs to an already established connection
*/
sk = nf_tproxy_get_sock_v4(nft_net(pkt), skb, iph->protocol,
iph->saddr, iph->daddr,
hp->source, hp->dest,
skb->dev, NF_TPROXY_LOOKUP_ESTABLISHED);
if (priv->sreg_addr)
taddr = nft_reg_load_be32(®s->data[priv->sreg_addr]);
taddr = nf_tproxy_laddr4(skb, taddr, iph->daddr);
if (priv->sreg_port)
tport = nft_reg_load_be16(®s->data[priv->sreg_port]);
if (!tport)
tport = hp->dest;
/* UDP has no TCP_TIME_WAIT state, so we never enter here */
if (sk && sk->sk_state == TCP_TIME_WAIT) {
/* reopening a TIME_WAIT connection needs special handling */
sk = nf_tproxy_handle_time_wait4(nft_net(pkt), skb, taddr, tport, sk);
} else if (!sk) {
/* no, there's no established connection, check if
* there's a listener on the redirected addr/port
*/
sk = nf_tproxy_get_sock_v4(nft_net(pkt), skb, iph->protocol,
iph->saddr, taddr,
hp->source, tport,
skb->dev, NF_TPROXY_LOOKUP_LISTENER);
}
if (sk && nf_tproxy_sk_is_transparent(sk))
nf_tproxy_assign_sock(skb, sk);
else
regs->verdict.code = NFT_BREAK;
}
#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
static void nft_tproxy_eval_v6(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_tproxy *priv = nft_expr_priv(expr);
struct sk_buff *skb = pkt->skb;
const struct ipv6hdr *iph = ipv6_hdr(skb);
int thoff = nft_thoff(pkt);
struct udphdr _hdr, *hp;
struct in6_addr taddr;
__be16 tport = 0;
struct sock *sk;
int l4proto;
memset(&taddr, 0, sizeof(taddr));
if (pkt->tprot != IPPROTO_TCP &&
pkt->tprot != IPPROTO_UDP) {
regs->verdict.code = NFT_BREAK;
return;
}
l4proto = pkt->tprot;
hp = skb_header_pointer(skb, thoff, sizeof(_hdr), &_hdr);
if (hp == NULL) {
regs->verdict.code = NFT_BREAK;
return;
}
/* check if there's an ongoing connection on the packet addresses, this
* happens if the redirect already happened and the current packet
* belongs to an already established connection
*/
sk = nf_tproxy_get_sock_v6(nft_net(pkt), skb, thoff, l4proto,
&iph->saddr, &iph->daddr,
hp->source, hp->dest,
nft_in(pkt), NF_TPROXY_LOOKUP_ESTABLISHED);
if (priv->sreg_addr)
memcpy(&taddr, ®s->data[priv->sreg_addr], sizeof(taddr));
taddr = *nf_tproxy_laddr6(skb, &taddr, &iph->daddr);
if (priv->sreg_port)
tport = nft_reg_load_be16(®s->data[priv->sreg_port]);
if (!tport)
tport = hp->dest;
/* UDP has no TCP_TIME_WAIT state, so we never enter here */
if (sk && sk->sk_state == TCP_TIME_WAIT) {
/* reopening a TIME_WAIT connection needs special handling */
sk = nf_tproxy_handle_time_wait6(skb, l4proto, thoff,
nft_net(pkt),
&taddr,
tport,
sk);
} else if (!sk) {
/* no there's no established connection, check if
* there's a listener on the redirected addr/port
*/
sk = nf_tproxy_get_sock_v6(nft_net(pkt), skb, thoff,
l4proto, &iph->saddr, &taddr,
hp->source, tport,
nft_in(pkt), NF_TPROXY_LOOKUP_LISTENER);
}
/* NOTE: assign_sock consumes our sk reference */
if (sk && nf_tproxy_sk_is_transparent(sk))
nf_tproxy_assign_sock(skb, sk);
else
regs->verdict.code = NFT_BREAK;
}
#endif
static void nft_tproxy_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_tproxy *priv = nft_expr_priv(expr);
switch (nft_pf(pkt)) {
case NFPROTO_IPV4:
switch (priv->family) {
case NFPROTO_IPV4:
case NFPROTO_UNSPEC:
nft_tproxy_eval_v4(expr, regs, pkt);
return;
}
break;
#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
case NFPROTO_IPV6:
switch (priv->family) {
case NFPROTO_IPV6:
case NFPROTO_UNSPEC:
nft_tproxy_eval_v6(expr, regs, pkt);
return;
}
#endif
}
regs->verdict.code = NFT_BREAK;
}
static const struct nla_policy nft_tproxy_policy[NFTA_TPROXY_MAX + 1] = {
[NFTA_TPROXY_FAMILY] = NLA_POLICY_MAX(NLA_BE32, 255),
[NFTA_TPROXY_REG_ADDR] = { .type = NLA_U32 },
[NFTA_TPROXY_REG_PORT] = { .type = NLA_U32 },
};
static int nft_tproxy_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_tproxy *priv = nft_expr_priv(expr);
unsigned int alen = 0;
int err;
if (!tb[NFTA_TPROXY_FAMILY] ||
(!tb[NFTA_TPROXY_REG_ADDR] && !tb[NFTA_TPROXY_REG_PORT]))
return -EINVAL;
priv->family = ntohl(nla_get_be32(tb[NFTA_TPROXY_FAMILY]));
switch (ctx->family) {
case NFPROTO_IPV4:
if (priv->family != NFPROTO_IPV4)
return -EINVAL;
break;
#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
case NFPROTO_IPV6:
if (priv->family != NFPROTO_IPV6)
return -EINVAL;
break;
#endif
case NFPROTO_INET:
break;
default:
return -EOPNOTSUPP;
}
/* Address is specified but the rule family is not set accordingly */
if (priv->family == NFPROTO_UNSPEC && tb[NFTA_TPROXY_REG_ADDR])
return -EINVAL;
switch (priv->family) {
case NFPROTO_IPV4:
alen = sizeof_field(union nf_inet_addr, in);
err = nf_defrag_ipv4_enable(ctx->net);
if (err)
return err;
break;
#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
case NFPROTO_IPV6:
alen = sizeof_field(union nf_inet_addr, in6);
err = nf_defrag_ipv6_enable(ctx->net);
if (err)
return err;
break;
#endif
case NFPROTO_UNSPEC:
/* No address is specified here */
err = nf_defrag_ipv4_enable(ctx->net);
if (err)
return err;
#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
err = nf_defrag_ipv6_enable(ctx->net);
if (err)
return err;
#endif
break;
default:
return -EOPNOTSUPP;
}
if (tb[NFTA_TPROXY_REG_ADDR]) {
err = nft_parse_register_load(tb[NFTA_TPROXY_REG_ADDR],
&priv->sreg_addr, alen);
if (err < 0)
return err;
}
if (tb[NFTA_TPROXY_REG_PORT]) {
err = nft_parse_register_load(tb[NFTA_TPROXY_REG_PORT],
&priv->sreg_port, sizeof(u16));
if (err < 0)
return err;
}
return 0;
}
static void nft_tproxy_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
const struct nft_tproxy *priv = nft_expr_priv(expr);
switch (priv->family) {
case NFPROTO_IPV4:
nf_defrag_ipv4_disable(ctx->net);
break;
#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
case NFPROTO_IPV6:
nf_defrag_ipv6_disable(ctx->net);
break;
#endif
case NFPROTO_UNSPEC:
nf_defrag_ipv4_disable(ctx->net);
#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
nf_defrag_ipv6_disable(ctx->net);
#endif
break;
}
}
static int nft_tproxy_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_tproxy *priv = nft_expr_priv(expr);
if (nla_put_be32(skb, NFTA_TPROXY_FAMILY, htonl(priv->family)))
return -1;
if (priv->sreg_addr &&
nft_dump_register(skb, NFTA_TPROXY_REG_ADDR, priv->sreg_addr))
return -1;
if (priv->sreg_port &&
nft_dump_register(skb, NFTA_TPROXY_REG_PORT, priv->sreg_port))
return -1;
return 0;
}
static int nft_tproxy_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
return nft_chain_validate_hooks(ctx->chain, 1 << NF_INET_PRE_ROUTING);
}
static struct nft_expr_type nft_tproxy_type;
static const struct nft_expr_ops nft_tproxy_ops = {
.type = &nft_tproxy_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_tproxy)),
.eval = nft_tproxy_eval,
.init = nft_tproxy_init,
.destroy = nft_tproxy_destroy,
.dump = nft_tproxy_dump,
.reduce = NFT_REDUCE_READONLY,
.validate = nft_tproxy_validate,
};
static struct nft_expr_type nft_tproxy_type __read_mostly = {
.name = "tproxy",
.ops = &nft_tproxy_ops,
.policy = nft_tproxy_policy,
.maxattr = NFTA_TPROXY_MAX,
.owner = THIS_MODULE,
};
static int __init nft_tproxy_module_init(void)
{
return nft_register_expr(&nft_tproxy_type);
}
static void __exit nft_tproxy_module_exit(void)
{
nft_unregister_expr(&nft_tproxy_type);
}
module_init(nft_tproxy_module_init);
module_exit(nft_tproxy_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Máté Eckl");
MODULE_DESCRIPTION("nf_tables tproxy support module");
MODULE_ALIAS_NFT_EXPR("tproxy");
| linux-master | net/netfilter/nft_tproxy.c |
// SPDX-License-Identifier: GPL-2.0-only
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2004 Netfilter Core Team <[email protected]>
* (C) 2006-2010 Patrick McHardy <[email protected]>
*/
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/netfilter.h>
#include <linux/in.h>
#include <linux/icmp.h>
#include <linux/seq_file.h>
#include <net/ip.h>
#include <net/checksum.h>
#include <linux/netfilter_ipv4.h>
#include <net/netfilter/nf_conntrack_tuple.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_log.h>
#include "nf_internals.h"
static const unsigned int nf_ct_icmp_timeout = 30*HZ;
bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
struct net *net, struct nf_conntrack_tuple *tuple)
{
const struct icmphdr *hp;
struct icmphdr _hdr;
hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
if (hp == NULL)
return false;
tuple->dst.u.icmp.type = hp->type;
tuple->src.u.icmp.id = hp->un.echo.id;
tuple->dst.u.icmp.code = hp->code;
return true;
}
/* Add 1; spaces filled with 0. */
static const u_int8_t invmap[] = {
[ICMP_ECHO] = ICMP_ECHOREPLY + 1,
[ICMP_ECHOREPLY] = ICMP_ECHO + 1,
[ICMP_TIMESTAMP] = ICMP_TIMESTAMPREPLY + 1,
[ICMP_TIMESTAMPREPLY] = ICMP_TIMESTAMP + 1,
[ICMP_INFO_REQUEST] = ICMP_INFO_REPLY + 1,
[ICMP_INFO_REPLY] = ICMP_INFO_REQUEST + 1,
[ICMP_ADDRESS] = ICMP_ADDRESSREPLY + 1,
[ICMP_ADDRESSREPLY] = ICMP_ADDRESS + 1
};
bool nf_conntrack_invert_icmp_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_tuple *orig)
{
if (orig->dst.u.icmp.type >= sizeof(invmap) ||
!invmap[orig->dst.u.icmp.type])
return false;
tuple->src.u.icmp.id = orig->src.u.icmp.id;
tuple->dst.u.icmp.type = invmap[orig->dst.u.icmp.type] - 1;
tuple->dst.u.icmp.code = orig->dst.u.icmp.code;
return true;
}
/* Returns verdict for packet, or -1 for invalid. */
int nf_conntrack_icmp_packet(struct nf_conn *ct,
struct sk_buff *skb,
enum ip_conntrack_info ctinfo,
const struct nf_hook_state *state)
{
/* Do not immediately delete the connection after the first
successful reply to avoid excessive conntrackd traffic
and also to handle correctly ICMP echo reply duplicates. */
unsigned int *timeout = nf_ct_timeout_lookup(ct);
static const u_int8_t valid_new[] = {
[ICMP_ECHO] = 1,
[ICMP_TIMESTAMP] = 1,
[ICMP_INFO_REQUEST] = 1,
[ICMP_ADDRESS] = 1
};
if (state->pf != NFPROTO_IPV4)
return -NF_ACCEPT;
if (ct->tuplehash[0].tuple.dst.u.icmp.type >= sizeof(valid_new) ||
!valid_new[ct->tuplehash[0].tuple.dst.u.icmp.type]) {
/* Can't create a new ICMP `conn' with this. */
pr_debug("icmp: can't create new conn with type %u\n",
ct->tuplehash[0].tuple.dst.u.icmp.type);
nf_ct_dump_tuple_ip(&ct->tuplehash[0].tuple);
return -NF_ACCEPT;
}
if (!timeout)
timeout = &nf_icmp_pernet(nf_ct_net(ct))->timeout;
nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
return NF_ACCEPT;
}
/* Check inner header is related to any of the existing connections */
int nf_conntrack_inet_error(struct nf_conn *tmpl, struct sk_buff *skb,
unsigned int dataoff,
const struct nf_hook_state *state,
u8 l4proto, union nf_inet_addr *outer_daddr)
{
struct nf_conntrack_tuple innertuple, origtuple;
const struct nf_conntrack_tuple_hash *h;
const struct nf_conntrack_zone *zone;
enum ip_conntrack_info ctinfo;
struct nf_conntrack_zone tmp;
union nf_inet_addr *ct_daddr;
enum ip_conntrack_dir dir;
struct nf_conn *ct;
WARN_ON(skb_nfct(skb));
zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
/* Are they talking about one of our connections? */
if (!nf_ct_get_tuplepr(skb, dataoff,
state->pf, state->net, &origtuple))
return -NF_ACCEPT;
/* Ordinarily, we'd expect the inverted tupleproto, but it's
been preserved inside the ICMP. */
if (!nf_ct_invert_tuple(&innertuple, &origtuple))
return -NF_ACCEPT;
h = nf_conntrack_find_get(state->net, zone, &innertuple);
if (!h)
return -NF_ACCEPT;
/* Consider: A -> T (=This machine) -> B
* Conntrack entry will look like this:
* Original: A->B
* Reply: B->T (SNAT case) OR A
*
* When this function runs, we got packet that looks like this:
* iphdr|icmphdr|inner_iphdr|l4header (tcp, udp, ..).
*
* Above nf_conntrack_find_get() makes lookup based on inner_hdr,
* so we should expect that destination of the found connection
* matches outer header destination address.
*
* In above example, we can consider these two cases:
* 1. Error coming in reply direction from B or M (middle box) to
* T (SNAT case) or A.
* Inner saddr will be B, dst will be T or A.
* The found conntrack will be reply tuple (B->T/A).
* 2. Error coming in original direction from A or M to B.
* Inner saddr will be A, inner daddr will be B.
* The found conntrack will be original tuple (A->B).
*
* In both cases, conntrack[dir].dst == inner.dst.
*
* A bogus packet could look like this:
* Inner: B->T
* Outer: B->X (other machine reachable by T).
*
* In this case, lookup yields connection A->B and will
* set packet from B->X as *RELATED*, even though no connection
* from X was ever seen.
*/
ct = nf_ct_tuplehash_to_ctrack(h);
dir = NF_CT_DIRECTION(h);
ct_daddr = &ct->tuplehash[dir].tuple.dst.u3;
if (!nf_inet_addr_cmp(outer_daddr, ct_daddr)) {
if (state->pf == AF_INET) {
nf_l4proto_log_invalid(skb, state,
l4proto,
"outer daddr %pI4 != inner %pI4",
&outer_daddr->ip, &ct_daddr->ip);
} else if (state->pf == AF_INET6) {
nf_l4proto_log_invalid(skb, state,
l4proto,
"outer daddr %pI6 != inner %pI6",
&outer_daddr->ip6, &ct_daddr->ip6);
}
nf_ct_put(ct);
return -NF_ACCEPT;
}
ctinfo = IP_CT_RELATED;
if (dir == IP_CT_DIR_REPLY)
ctinfo += IP_CT_IS_REPLY;
/* Update skb to refer to this connection */
nf_ct_set(skb, ct, ctinfo);
return NF_ACCEPT;
}
static void icmp_error_log(const struct sk_buff *skb,
const struct nf_hook_state *state,
const char *msg)
{
nf_l4proto_log_invalid(skb, state, IPPROTO_ICMP, "%s", msg);
}
/* Small and modified version of icmp_rcv */
int nf_conntrack_icmpv4_error(struct nf_conn *tmpl,
struct sk_buff *skb, unsigned int dataoff,
const struct nf_hook_state *state)
{
union nf_inet_addr outer_daddr;
const struct icmphdr *icmph;
struct icmphdr _ih;
/* Not enough header? */
icmph = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih);
if (icmph == NULL) {
icmp_error_log(skb, state, "short packet");
return -NF_ACCEPT;
}
/* See nf_conntrack_proto_tcp.c */
if (state->net->ct.sysctl_checksum &&
state->hook == NF_INET_PRE_ROUTING &&
nf_ip_checksum(skb, state->hook, dataoff, IPPROTO_ICMP)) {
icmp_error_log(skb, state, "bad hw icmp checksum");
return -NF_ACCEPT;
}
/*
* 18 is the highest 'known' ICMP type. Anything else is a mystery
*
* RFC 1122: 3.2.2 Unknown ICMP messages types MUST be silently
* discarded.
*/
if (icmph->type > NR_ICMP_TYPES) {
icmp_error_log(skb, state, "invalid icmp type");
return -NF_ACCEPT;
}
/* Need to track icmp error message? */
if (!icmp_is_err(icmph->type))
return NF_ACCEPT;
memset(&outer_daddr, 0, sizeof(outer_daddr));
outer_daddr.ip = ip_hdr(skb)->daddr;
dataoff += sizeof(*icmph);
return nf_conntrack_inet_error(tmpl, skb, dataoff, state,
IPPROTO_ICMP, &outer_daddr);
}
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
static int icmp_tuple_to_nlattr(struct sk_buff *skb,
const struct nf_conntrack_tuple *t)
{
if (nla_put_be16(skb, CTA_PROTO_ICMP_ID, t->src.u.icmp.id) ||
nla_put_u8(skb, CTA_PROTO_ICMP_TYPE, t->dst.u.icmp.type) ||
nla_put_u8(skb, CTA_PROTO_ICMP_CODE, t->dst.u.icmp.code))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static const struct nla_policy icmp_nla_policy[CTA_PROTO_MAX+1] = {
[CTA_PROTO_ICMP_TYPE] = { .type = NLA_U8 },
[CTA_PROTO_ICMP_CODE] = { .type = NLA_U8 },
[CTA_PROTO_ICMP_ID] = { .type = NLA_U16 },
};
static int icmp_nlattr_to_tuple(struct nlattr *tb[],
struct nf_conntrack_tuple *tuple,
u_int32_t flags)
{
if (flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_TYPE)) {
if (!tb[CTA_PROTO_ICMP_TYPE])
return -EINVAL;
tuple->dst.u.icmp.type = nla_get_u8(tb[CTA_PROTO_ICMP_TYPE]);
if (tuple->dst.u.icmp.type >= sizeof(invmap) ||
!invmap[tuple->dst.u.icmp.type])
return -EINVAL;
}
if (flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_CODE)) {
if (!tb[CTA_PROTO_ICMP_CODE])
return -EINVAL;
tuple->dst.u.icmp.code = nla_get_u8(tb[CTA_PROTO_ICMP_CODE]);
}
if (flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_ID)) {
if (!tb[CTA_PROTO_ICMP_ID])
return -EINVAL;
tuple->src.u.icmp.id = nla_get_be16(tb[CTA_PROTO_ICMP_ID]);
}
return 0;
}
static unsigned int icmp_nlattr_tuple_size(void)
{
static unsigned int size __read_mostly;
if (!size)
size = nla_policy_len(icmp_nla_policy, CTA_PROTO_MAX + 1);
return size;
}
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
static int icmp_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
unsigned int *timeout = data;
struct nf_icmp_net *in = nf_icmp_pernet(net);
if (tb[CTA_TIMEOUT_ICMP_TIMEOUT]) {
if (!timeout)
timeout = &in->timeout;
*timeout =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMP_TIMEOUT])) * HZ;
} else if (timeout) {
/* Set default ICMP timeout. */
*timeout = in->timeout;
}
return 0;
}
static int
icmp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
{
const unsigned int *timeout = data;
if (nla_put_be32(skb, CTA_TIMEOUT_ICMP_TIMEOUT, htonl(*timeout / HZ)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -ENOSPC;
}
static const struct nla_policy
icmp_timeout_nla_policy[CTA_TIMEOUT_ICMP_MAX+1] = {
[CTA_TIMEOUT_ICMP_TIMEOUT] = { .type = NLA_U32 },
};
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
void nf_conntrack_icmp_init_net(struct net *net)
{
struct nf_icmp_net *in = nf_icmp_pernet(net);
in->timeout = nf_ct_icmp_timeout;
}
const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp =
{
.l4proto = IPPROTO_ICMP,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.tuple_to_nlattr = icmp_tuple_to_nlattr,
.nlattr_tuple_size = icmp_nlattr_tuple_size,
.nlattr_to_tuple = icmp_nlattr_to_tuple,
.nla_policy = icmp_nla_policy,
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = icmp_timeout_nlattr_to_obj,
.obj_to_nlattr = icmp_timeout_obj_to_nlattr,
.nlattr_max = CTA_TIMEOUT_ICMP_MAX,
.obj_size = sizeof(unsigned int),
.nla_policy = icmp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
};
| linux-master | net/netfilter/nf_conntrack_proto_icmp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Amanda extension for IP connection tracking
*
* (C) 2002 by Brian J. Murrell <[email protected]>
* based on HW's ip_conntrack_irc.c as well as other modules
* (C) 2006 Patrick McHardy <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/textsearch.h>
#include <linux/skbuff.h>
#include <linux/in.h>
#include <linux/udp.h>
#include <linux/netfilter.h>
#include <linux/gfp.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <linux/netfilter/nf_conntrack_amanda.h>
static unsigned int master_timeout __read_mostly = 300;
static char *ts_algo = "kmp";
#define HELPER_NAME "amanda"
MODULE_AUTHOR("Brian J. Murrell <[email protected]>");
MODULE_DESCRIPTION("Amanda connection tracking module");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ip_conntrack_amanda");
MODULE_ALIAS_NFCT_HELPER(HELPER_NAME);
module_param(master_timeout, uint, 0600);
MODULE_PARM_DESC(master_timeout, "timeout for the master connection");
module_param(ts_algo, charp, 0400);
MODULE_PARM_DESC(ts_algo, "textsearch algorithm to use (default kmp)");
unsigned int (*nf_nat_amanda_hook)(struct sk_buff *skb,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned int matchoff,
unsigned int matchlen,
struct nf_conntrack_expect *exp)
__read_mostly;
EXPORT_SYMBOL_GPL(nf_nat_amanda_hook);
enum amanda_strings {
SEARCH_CONNECT,
SEARCH_NEWLINE,
SEARCH_DATA,
SEARCH_MESG,
SEARCH_INDEX,
SEARCH_STATE,
};
static struct {
const char *string;
size_t len;
struct ts_config *ts;
} search[] __read_mostly = {
[SEARCH_CONNECT] = {
.string = "CONNECT ",
.len = 8,
},
[SEARCH_NEWLINE] = {
.string = "\n",
.len = 1,
},
[SEARCH_DATA] = {
.string = "DATA ",
.len = 5,
},
[SEARCH_MESG] = {
.string = "MESG ",
.len = 5,
},
[SEARCH_INDEX] = {
.string = "INDEX ",
.len = 6,
},
[SEARCH_STATE] = {
.string = "STATE ",
.len = 6,
},
};
static int amanda_help(struct sk_buff *skb,
unsigned int protoff,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo)
{
struct nf_conntrack_expect *exp;
struct nf_conntrack_tuple *tuple;
unsigned int dataoff, start, stop, off, i;
char pbuf[sizeof("65535")], *tmp;
u_int16_t len;
__be16 port;
int ret = NF_ACCEPT;
typeof(nf_nat_amanda_hook) nf_nat_amanda;
/* Only look at packets from the Amanda server */
if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
return NF_ACCEPT;
/* increase the UDP timeout of the master connection as replies from
* Amanda clients to the server can be quite delayed */
nf_ct_refresh(ct, skb, master_timeout * HZ);
/* No data? */
dataoff = protoff + sizeof(struct udphdr);
if (dataoff >= skb->len) {
net_err_ratelimited("amanda_help: skblen = %u\n", skb->len);
return NF_ACCEPT;
}
start = skb_find_text(skb, dataoff, skb->len,
search[SEARCH_CONNECT].ts);
if (start == UINT_MAX)
goto out;
start += dataoff + search[SEARCH_CONNECT].len;
stop = skb_find_text(skb, start, skb->len,
search[SEARCH_NEWLINE].ts);
if (stop == UINT_MAX)
goto out;
stop += start;
for (i = SEARCH_DATA; i <= SEARCH_STATE; i++) {
off = skb_find_text(skb, start, stop, search[i].ts);
if (off == UINT_MAX)
continue;
off += start + search[i].len;
len = min_t(unsigned int, sizeof(pbuf) - 1, stop - off);
if (skb_copy_bits(skb, off, pbuf, len))
break;
pbuf[len] = '\0';
port = htons(simple_strtoul(pbuf, &tmp, 10));
len = tmp - pbuf;
if (port == 0 || len > 5)
break;
exp = nf_ct_expect_alloc(ct);
if (exp == NULL) {
nf_ct_helper_log(skb, ct, "cannot alloc expectation");
ret = NF_DROP;
goto out;
}
tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
nf_ct_l3num(ct),
&tuple->src.u3, &tuple->dst.u3,
IPPROTO_TCP, NULL, &port);
nf_nat_amanda = rcu_dereference(nf_nat_amanda_hook);
if (nf_nat_amanda && ct->status & IPS_NAT_MASK)
ret = nf_nat_amanda(skb, ctinfo, protoff,
off - dataoff, len, exp);
else if (nf_ct_expect_related(exp, 0) != 0) {
nf_ct_helper_log(skb, ct, "cannot add expectation");
ret = NF_DROP;
}
nf_ct_expect_put(exp);
}
out:
return ret;
}
static const struct nf_conntrack_expect_policy amanda_exp_policy = {
.max_expected = 4,
.timeout = 180,
};
static struct nf_conntrack_helper amanda_helper[2] __read_mostly = {
{
.name = HELPER_NAME,
.me = THIS_MODULE,
.help = amanda_help,
.tuple.src.l3num = AF_INET,
.tuple.src.u.udp.port = cpu_to_be16(10080),
.tuple.dst.protonum = IPPROTO_UDP,
.expect_policy = &amanda_exp_policy,
.nat_mod_name = NF_NAT_HELPER_NAME(HELPER_NAME),
},
{
.name = "amanda",
.me = THIS_MODULE,
.help = amanda_help,
.tuple.src.l3num = AF_INET6,
.tuple.src.u.udp.port = cpu_to_be16(10080),
.tuple.dst.protonum = IPPROTO_UDP,
.expect_policy = &amanda_exp_policy,
.nat_mod_name = NF_NAT_HELPER_NAME(HELPER_NAME),
},
};
static void __exit nf_conntrack_amanda_fini(void)
{
int i;
nf_conntrack_helpers_unregister(amanda_helper,
ARRAY_SIZE(amanda_helper));
for (i = 0; i < ARRAY_SIZE(search); i++)
textsearch_destroy(search[i].ts);
}
static int __init nf_conntrack_amanda_init(void)
{
int ret, i;
NF_CT_HELPER_BUILD_BUG_ON(0);
for (i = 0; i < ARRAY_SIZE(search); i++) {
search[i].ts = textsearch_prepare(ts_algo, search[i].string,
search[i].len,
GFP_KERNEL, TS_AUTOLOAD);
if (IS_ERR(search[i].ts)) {
ret = PTR_ERR(search[i].ts);
goto err1;
}
}
ret = nf_conntrack_helpers_register(amanda_helper,
ARRAY_SIZE(amanda_helper));
if (ret < 0)
goto err1;
return 0;
err1:
while (--i >= 0)
textsearch_destroy(search[i].ts);
return ret;
}
module_init(nf_conntrack_amanda_init);
module_exit(nf_conntrack_amanda_fini);
| linux-master | net/netfilter/nf_conntrack_amanda.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* (C) 2012 by Pablo Neira Ayuso <[email protected]>
* (C) 2012 by Vyatta Inc. <http://www.vyatta.com>
*/
#include <linux/types.h>
#include <linux/netfilter.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
#include <linux/stddef.h>
#include <linux/err.h>
#include <linux/percpu.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_extend.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_timeout.h>
const struct nf_ct_timeout_hooks __rcu *nf_ct_timeout_hook __read_mostly;
EXPORT_SYMBOL_GPL(nf_ct_timeout_hook);
static int untimeout(struct nf_conn *ct, void *timeout)
{
struct nf_conn_timeout *timeout_ext = nf_ct_timeout_find(ct);
if (timeout_ext) {
const struct nf_ct_timeout *t;
t = rcu_access_pointer(timeout_ext->timeout);
if (!timeout || t == timeout)
RCU_INIT_POINTER(timeout_ext->timeout, NULL);
}
/* We are not intended to delete this conntrack. */
return 0;
}
void nf_ct_untimeout(struct net *net, struct nf_ct_timeout *timeout)
{
struct nf_ct_iter_data iter_data = {
.net = net,
.data = timeout,
};
nf_ct_iterate_cleanup_net(untimeout, &iter_data);
}
EXPORT_SYMBOL_GPL(nf_ct_untimeout);
static void __nf_ct_timeout_put(struct nf_ct_timeout *timeout)
{
const struct nf_ct_timeout_hooks *h = rcu_dereference(nf_ct_timeout_hook);
if (h)
h->timeout_put(timeout);
}
int nf_ct_set_timeout(struct net *net, struct nf_conn *ct,
u8 l3num, u8 l4num, const char *timeout_name)
{
const struct nf_ct_timeout_hooks *h;
struct nf_ct_timeout *timeout;
struct nf_conn_timeout *timeout_ext;
const char *errmsg = NULL;
int ret = 0;
rcu_read_lock();
h = rcu_dereference(nf_ct_timeout_hook);
if (!h) {
ret = -ENOENT;
errmsg = "Timeout policy base is empty";
goto out;
}
timeout = h->timeout_find_get(net, timeout_name);
if (!timeout) {
ret = -ENOENT;
pr_info_ratelimited("No such timeout policy \"%s\"\n",
timeout_name);
goto out;
}
if (timeout->l3num != l3num) {
ret = -EINVAL;
pr_info_ratelimited("Timeout policy `%s' can only be used by "
"L%d protocol number %d\n",
timeout_name, 3, timeout->l3num);
goto err_put_timeout;
}
/* Make sure the timeout policy matches any existing protocol tracker,
* otherwise default to generic.
*/
if (timeout->l4proto->l4proto != l4num) {
ret = -EINVAL;
pr_info_ratelimited("Timeout policy `%s' can only be used by "
"L%d protocol number %d\n",
timeout_name, 4, timeout->l4proto->l4proto);
goto err_put_timeout;
}
timeout_ext = nf_ct_timeout_ext_add(ct, timeout, GFP_ATOMIC);
if (!timeout_ext) {
ret = -ENOMEM;
goto err_put_timeout;
}
rcu_read_unlock();
return ret;
err_put_timeout:
__nf_ct_timeout_put(timeout);
out:
rcu_read_unlock();
if (errmsg)
pr_info_ratelimited("%s\n", errmsg);
return ret;
}
EXPORT_SYMBOL_GPL(nf_ct_set_timeout);
void nf_ct_destroy_timeout(struct nf_conn *ct)
{
struct nf_conn_timeout *timeout_ext;
const struct nf_ct_timeout_hooks *h;
rcu_read_lock();
h = rcu_dereference(nf_ct_timeout_hook);
if (h) {
timeout_ext = nf_ct_timeout_find(ct);
if (timeout_ext) {
struct nf_ct_timeout *t;
t = rcu_dereference(timeout_ext->timeout);
if (t)
h->timeout_put(t);
RCU_INIT_POINTER(timeout_ext->timeout, NULL);
}
}
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(nf_ct_destroy_timeout);
| linux-master | net/netfilter/nf_conntrack_timeout.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Helper handling for netfilter. */
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2006 Netfilter Core Team <[email protected]>
* (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
* (C) 2006-2012 Patrick McHardy <[email protected]>
*/
#include <linux/types.h>
#include <linux/netfilter.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
#include <linux/stddef.h>
#include <linux/random.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/rculist.h>
#include <linux/rtnetlink.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_extend.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_log.h>
#include <net/ip.h>
static DEFINE_MUTEX(nf_ct_helper_mutex);
struct hlist_head *nf_ct_helper_hash __read_mostly;
EXPORT_SYMBOL_GPL(nf_ct_helper_hash);
unsigned int nf_ct_helper_hsize __read_mostly;
EXPORT_SYMBOL_GPL(nf_ct_helper_hsize);
static unsigned int nf_ct_helper_count __read_mostly;
static DEFINE_MUTEX(nf_ct_nat_helpers_mutex);
static struct list_head nf_ct_nat_helpers __read_mostly;
/* Stupid hash, but collision free for the default registrations of the
* helpers currently in the kernel. */
static unsigned int helper_hash(const struct nf_conntrack_tuple *tuple)
{
return (((tuple->src.l3num << 8) | tuple->dst.protonum) ^
(__force __u16)tuple->src.u.all) % nf_ct_helper_hsize;
}
struct nf_conntrack_helper *
__nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum)
{
struct nf_conntrack_helper *h;
unsigned int i;
for (i = 0; i < nf_ct_helper_hsize; i++) {
hlist_for_each_entry_rcu(h, &nf_ct_helper_hash[i], hnode) {
if (strcmp(h->name, name))
continue;
if (h->tuple.src.l3num != NFPROTO_UNSPEC &&
h->tuple.src.l3num != l3num)
continue;
if (h->tuple.dst.protonum == protonum)
return h;
}
}
return NULL;
}
EXPORT_SYMBOL_GPL(__nf_conntrack_helper_find);
struct nf_conntrack_helper *
nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum)
{
struct nf_conntrack_helper *h;
rcu_read_lock();
h = __nf_conntrack_helper_find(name, l3num, protonum);
#ifdef CONFIG_MODULES
if (h == NULL) {
rcu_read_unlock();
if (request_module("nfct-helper-%s", name) == 0) {
rcu_read_lock();
h = __nf_conntrack_helper_find(name, l3num, protonum);
} else {
return h;
}
}
#endif
if (h != NULL && !try_module_get(h->me))
h = NULL;
if (h != NULL && !refcount_inc_not_zero(&h->refcnt)) {
module_put(h->me);
h = NULL;
}
rcu_read_unlock();
return h;
}
EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get);
void nf_conntrack_helper_put(struct nf_conntrack_helper *helper)
{
refcount_dec(&helper->refcnt);
module_put(helper->me);
}
EXPORT_SYMBOL_GPL(nf_conntrack_helper_put);
static struct nf_conntrack_nat_helper *
nf_conntrack_nat_helper_find(const char *mod_name)
{
struct nf_conntrack_nat_helper *cur;
bool found = false;
list_for_each_entry_rcu(cur, &nf_ct_nat_helpers, list) {
if (!strcmp(cur->mod_name, mod_name)) {
found = true;
break;
}
}
return found ? cur : NULL;
}
int
nf_nat_helper_try_module_get(const char *name, u16 l3num, u8 protonum)
{
struct nf_conntrack_helper *h;
struct nf_conntrack_nat_helper *nat;
char mod_name[NF_CT_HELPER_NAME_LEN];
int ret = 0;
rcu_read_lock();
h = __nf_conntrack_helper_find(name, l3num, protonum);
if (!h) {
rcu_read_unlock();
return -ENOENT;
}
nat = nf_conntrack_nat_helper_find(h->nat_mod_name);
if (!nat) {
snprintf(mod_name, sizeof(mod_name), "%s", h->nat_mod_name);
rcu_read_unlock();
request_module("%s", mod_name);
rcu_read_lock();
nat = nf_conntrack_nat_helper_find(mod_name);
if (!nat) {
rcu_read_unlock();
return -ENOENT;
}
}
if (!try_module_get(nat->module))
ret = -ENOENT;
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(nf_nat_helper_try_module_get);
void nf_nat_helper_put(struct nf_conntrack_helper *helper)
{
struct nf_conntrack_nat_helper *nat;
nat = nf_conntrack_nat_helper_find(helper->nat_mod_name);
if (WARN_ON_ONCE(!nat))
return;
module_put(nat->module);
}
EXPORT_SYMBOL_GPL(nf_nat_helper_put);
struct nf_conn_help *
nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp)
{
struct nf_conn_help *help;
help = nf_ct_ext_add(ct, NF_CT_EXT_HELPER, gfp);
if (help)
INIT_HLIST_HEAD(&help->expectations);
else
pr_debug("failed to add helper extension area");
return help;
}
EXPORT_SYMBOL_GPL(nf_ct_helper_ext_add);
int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
gfp_t flags)
{
struct nf_conntrack_helper *helper = NULL;
struct nf_conn_help *help;
/* We already got a helper explicitly attached. The function
* nf_conntrack_alter_reply - in case NAT is in use - asks for looking
* the helper up again. Since now the user is in full control of
* making consistent helper configurations, skip this automatic
* re-lookup, otherwise we'll lose the helper.
*/
if (test_bit(IPS_HELPER_BIT, &ct->status))
return 0;
if (WARN_ON_ONCE(!tmpl))
return 0;
help = nfct_help(tmpl);
if (help != NULL) {
helper = rcu_dereference(help->helper);
set_bit(IPS_HELPER_BIT, &ct->status);
}
help = nfct_help(ct);
if (helper == NULL) {
if (help)
RCU_INIT_POINTER(help->helper, NULL);
return 0;
}
if (help == NULL) {
help = nf_ct_helper_ext_add(ct, flags);
if (help == NULL)
return -ENOMEM;
} else {
/* We only allow helper re-assignment of the same sort since
* we cannot reallocate the helper extension area.
*/
struct nf_conntrack_helper *tmp = rcu_dereference(help->helper);
if (tmp && tmp->help != helper->help) {
RCU_INIT_POINTER(help->helper, NULL);
return 0;
}
}
rcu_assign_pointer(help->helper, helper);
return 0;
}
EXPORT_SYMBOL_GPL(__nf_ct_try_assign_helper);
/* appropriate ct lock protecting must be taken by caller */
static int unhelp(struct nf_conn *ct, void *me)
{
struct nf_conn_help *help = nfct_help(ct);
if (help && rcu_dereference_raw(help->helper) == me) {
nf_conntrack_event(IPCT_HELPER, ct);
RCU_INIT_POINTER(help->helper, NULL);
}
/* We are not intended to delete this conntrack. */
return 0;
}
void nf_ct_helper_destroy(struct nf_conn *ct)
{
struct nf_conn_help *help = nfct_help(ct);
struct nf_conntrack_helper *helper;
if (help) {
rcu_read_lock();
helper = rcu_dereference(help->helper);
if (helper && helper->destroy)
helper->destroy(ct);
rcu_read_unlock();
}
}
static LIST_HEAD(nf_ct_helper_expectfn_list);
void nf_ct_helper_expectfn_register(struct nf_ct_helper_expectfn *n)
{
spin_lock_bh(&nf_conntrack_expect_lock);
list_add_rcu(&n->head, &nf_ct_helper_expectfn_list);
spin_unlock_bh(&nf_conntrack_expect_lock);
}
EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_register);
void nf_ct_helper_expectfn_unregister(struct nf_ct_helper_expectfn *n)
{
spin_lock_bh(&nf_conntrack_expect_lock);
list_del_rcu(&n->head);
spin_unlock_bh(&nf_conntrack_expect_lock);
}
EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_unregister);
/* Caller should hold the rcu lock */
struct nf_ct_helper_expectfn *
nf_ct_helper_expectfn_find_by_name(const char *name)
{
struct nf_ct_helper_expectfn *cur;
bool found = false;
list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
if (!strcmp(cur->name, name)) {
found = true;
break;
}
}
return found ? cur : NULL;
}
EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_name);
/* Caller should hold the rcu lock */
struct nf_ct_helper_expectfn *
nf_ct_helper_expectfn_find_by_symbol(const void *symbol)
{
struct nf_ct_helper_expectfn *cur;
bool found = false;
list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
if (cur->expectfn == symbol) {
found = true;
break;
}
}
return found ? cur : NULL;
}
EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_symbol);
__printf(3, 4)
void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct,
const char *fmt, ...)
{
const struct nf_conn_help *help;
const struct nf_conntrack_helper *helper;
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
/* Called from the helper function, this call never fails */
help = nfct_help(ct);
/* rcu_read_lock()ed by nf_hook_thresh */
helper = rcu_dereference(help->helper);
nf_log_packet(nf_ct_net(ct), nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL,
"nf_ct_%s: dropping packet: %pV ", helper->name, &vaf);
va_end(args);
}
EXPORT_SYMBOL_GPL(nf_ct_helper_log);
int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
{
struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) };
unsigned int h = helper_hash(&me->tuple);
struct nf_conntrack_helper *cur;
int ret = 0, i;
BUG_ON(me->expect_policy == NULL);
BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES);
BUG_ON(strlen(me->name) > NF_CT_HELPER_NAME_LEN - 1);
if (!nf_ct_helper_hash)
return -ENOENT;
if (me->expect_policy->max_expected > NF_CT_EXPECT_MAX_CNT)
return -EINVAL;
mutex_lock(&nf_ct_helper_mutex);
for (i = 0; i < nf_ct_helper_hsize; i++) {
hlist_for_each_entry(cur, &nf_ct_helper_hash[i], hnode) {
if (!strcmp(cur->name, me->name) &&
(cur->tuple.src.l3num == NFPROTO_UNSPEC ||
cur->tuple.src.l3num == me->tuple.src.l3num) &&
cur->tuple.dst.protonum == me->tuple.dst.protonum) {
ret = -EEXIST;
goto out;
}
}
}
/* avoid unpredictable behaviour for auto_assign_helper */
if (!(me->flags & NF_CT_HELPER_F_USERSPACE)) {
hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) {
if (nf_ct_tuple_src_mask_cmp(&cur->tuple, &me->tuple,
&mask)) {
ret = -EEXIST;
goto out;
}
}
}
refcount_set(&me->refcnt, 1);
hlist_add_head_rcu(&me->hnode, &nf_ct_helper_hash[h]);
nf_ct_helper_count++;
out:
mutex_unlock(&nf_ct_helper_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(nf_conntrack_helper_register);
static bool expect_iter_me(struct nf_conntrack_expect *exp, void *data)
{
struct nf_conn_help *help = nfct_help(exp->master);
const struct nf_conntrack_helper *me = data;
const struct nf_conntrack_helper *this;
if (exp->helper == me)
return true;
this = rcu_dereference_protected(help->helper,
lockdep_is_held(&nf_conntrack_expect_lock));
return this == me;
}
void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
{
mutex_lock(&nf_ct_helper_mutex);
hlist_del_rcu(&me->hnode);
nf_ct_helper_count--;
mutex_unlock(&nf_ct_helper_mutex);
/* Make sure every nothing is still using the helper unless its a
* connection in the hash.
*/
synchronize_rcu();
nf_ct_expect_iterate_destroy(expect_iter_me, NULL);
nf_ct_iterate_destroy(unhelp, me);
}
EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister);
void nf_ct_helper_init(struct nf_conntrack_helper *helper,
u16 l3num, u16 protonum, const char *name,
u16 default_port, u16 spec_port, u32 id,
const struct nf_conntrack_expect_policy *exp_pol,
u32 expect_class_max,
int (*help)(struct sk_buff *skb, unsigned int protoff,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo),
int (*from_nlattr)(struct nlattr *attr,
struct nf_conn *ct),
struct module *module)
{
helper->tuple.src.l3num = l3num;
helper->tuple.dst.protonum = protonum;
helper->tuple.src.u.all = htons(spec_port);
helper->expect_policy = exp_pol;
helper->expect_class_max = expect_class_max;
helper->help = help;
helper->from_nlattr = from_nlattr;
helper->me = module;
snprintf(helper->nat_mod_name, sizeof(helper->nat_mod_name),
NF_NAT_HELPER_PREFIX "%s", name);
if (spec_port == default_port)
snprintf(helper->name, sizeof(helper->name), "%s", name);
else
snprintf(helper->name, sizeof(helper->name), "%s-%u", name, id);
}
EXPORT_SYMBOL_GPL(nf_ct_helper_init);
int nf_conntrack_helpers_register(struct nf_conntrack_helper *helper,
unsigned int n)
{
unsigned int i;
int err = 0;
for (i = 0; i < n; i++) {
err = nf_conntrack_helper_register(&helper[i]);
if (err < 0)
goto err;
}
return err;
err:
if (i > 0)
nf_conntrack_helpers_unregister(helper, i);
return err;
}
EXPORT_SYMBOL_GPL(nf_conntrack_helpers_register);
void nf_conntrack_helpers_unregister(struct nf_conntrack_helper *helper,
unsigned int n)
{
while (n-- > 0)
nf_conntrack_helper_unregister(&helper[n]);
}
EXPORT_SYMBOL_GPL(nf_conntrack_helpers_unregister);
void nf_nat_helper_register(struct nf_conntrack_nat_helper *nat)
{
mutex_lock(&nf_ct_nat_helpers_mutex);
list_add_rcu(&nat->list, &nf_ct_nat_helpers);
mutex_unlock(&nf_ct_nat_helpers_mutex);
}
EXPORT_SYMBOL_GPL(nf_nat_helper_register);
void nf_nat_helper_unregister(struct nf_conntrack_nat_helper *nat)
{
mutex_lock(&nf_ct_nat_helpers_mutex);
list_del_rcu(&nat->list);
mutex_unlock(&nf_ct_nat_helpers_mutex);
}
EXPORT_SYMBOL_GPL(nf_nat_helper_unregister);
int nf_conntrack_helper_init(void)
{
nf_ct_helper_hsize = 1; /* gets rounded up to use one page */
nf_ct_helper_hash =
nf_ct_alloc_hashtable(&nf_ct_helper_hsize, 0);
if (!nf_ct_helper_hash)
return -ENOMEM;
INIT_LIST_HEAD(&nf_ct_nat_helpers);
return 0;
}
void nf_conntrack_helper_fini(void)
{
kvfree(nf_ct_helper_hash);
nf_ct_helper_hash = NULL;
}
| linux-master | net/netfilter/nf_conntrack_helper.c |
// SPDX-License-Identifier: GPL-2.0-only
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2004 Netfilter Core Team <[email protected]>
* (C) 2006-2012 Patrick McHardy <[email protected]>
*/
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/module.h>
#include <linux/udp.h>
#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
#include <net/checksum.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_log.h>
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
static const unsigned int udp_timeouts[UDP_CT_MAX] = {
[UDP_CT_UNREPLIED] = 30*HZ,
[UDP_CT_REPLIED] = 120*HZ,
};
static unsigned int *udp_get_timeouts(struct net *net)
{
return nf_udp_pernet(net)->timeouts;
}
static void udp_error_log(const struct sk_buff *skb,
const struct nf_hook_state *state,
const char *msg)
{
nf_l4proto_log_invalid(skb, state, IPPROTO_UDP, "%s", msg);
}
static bool udp_error(struct sk_buff *skb,
unsigned int dataoff,
const struct nf_hook_state *state)
{
unsigned int udplen = skb->len - dataoff;
const struct udphdr *hdr;
struct udphdr _hdr;
/* Header is too small? */
hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
if (!hdr) {
udp_error_log(skb, state, "short packet");
return true;
}
/* Truncated/malformed packets */
if (ntohs(hdr->len) > udplen || ntohs(hdr->len) < sizeof(*hdr)) {
udp_error_log(skb, state, "truncated/malformed packet");
return true;
}
/* Packet with no checksum */
if (!hdr->check)
return false;
/* Checksum invalid? Ignore.
* We skip checking packets on the outgoing path
* because the checksum is assumed to be correct.
* FIXME: Source route IP option packets --RR */
if (state->hook == NF_INET_PRE_ROUTING &&
state->net->ct.sysctl_checksum &&
nf_checksum(skb, state->hook, dataoff, IPPROTO_UDP, state->pf)) {
udp_error_log(skb, state, "bad checksum");
return true;
}
return false;
}
/* Returns verdict for packet, and may modify conntracktype */
int nf_conntrack_udp_packet(struct nf_conn *ct,
struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
const struct nf_hook_state *state)
{
unsigned int *timeouts;
unsigned long status;
if (udp_error(skb, dataoff, state))
return -NF_ACCEPT;
timeouts = nf_ct_timeout_lookup(ct);
if (!timeouts)
timeouts = udp_get_timeouts(nf_ct_net(ct));
status = READ_ONCE(ct->status);
if ((status & IPS_CONFIRMED) == 0)
ct->proto.udp.stream_ts = 2 * HZ + jiffies;
/* If we've seen traffic both ways, this is some kind of UDP
* stream. Set Assured.
*/
if (status & IPS_SEEN_REPLY) {
unsigned long extra = timeouts[UDP_CT_UNREPLIED];
bool stream = false;
/* Still active after two seconds? Extend timeout. */
if (time_after(jiffies, ct->proto.udp.stream_ts)) {
extra = timeouts[UDP_CT_REPLIED];
stream = (status & IPS_ASSURED) == 0;
}
nf_ct_refresh_acct(ct, ctinfo, skb, extra);
/* never set ASSURED for IPS_NAT_CLASH, they time out soon */
if (unlikely((status & IPS_NAT_CLASH)))
return NF_ACCEPT;
/* Also, more likely to be important, and not a probe */
if (stream && !test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
nf_conntrack_event_cache(IPCT_ASSURED, ct);
} else {
nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[UDP_CT_UNREPLIED]);
}
return NF_ACCEPT;
}
#ifdef CONFIG_NF_CT_PROTO_UDPLITE
static void udplite_error_log(const struct sk_buff *skb,
const struct nf_hook_state *state,
const char *msg)
{
nf_l4proto_log_invalid(skb, state, IPPROTO_UDPLITE, "%s", msg);
}
static bool udplite_error(struct sk_buff *skb,
unsigned int dataoff,
const struct nf_hook_state *state)
{
unsigned int udplen = skb->len - dataoff;
const struct udphdr *hdr;
struct udphdr _hdr;
unsigned int cscov;
/* Header is too small? */
hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
if (!hdr) {
udplite_error_log(skb, state, "short packet");
return true;
}
cscov = ntohs(hdr->len);
if (cscov == 0) {
cscov = udplen;
} else if (cscov < sizeof(*hdr) || cscov > udplen) {
udplite_error_log(skb, state, "invalid checksum coverage");
return true;
}
/* UDPLITE mandates checksums */
if (!hdr->check) {
udplite_error_log(skb, state, "checksum missing");
return true;
}
/* Checksum invalid? Ignore. */
if (state->hook == NF_INET_PRE_ROUTING &&
state->net->ct.sysctl_checksum &&
nf_checksum_partial(skb, state->hook, dataoff, cscov, IPPROTO_UDP,
state->pf)) {
udplite_error_log(skb, state, "bad checksum");
return true;
}
return false;
}
/* Returns verdict for packet, and may modify conntracktype */
int nf_conntrack_udplite_packet(struct nf_conn *ct,
struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
const struct nf_hook_state *state)
{
unsigned int *timeouts;
if (udplite_error(skb, dataoff, state))
return -NF_ACCEPT;
timeouts = nf_ct_timeout_lookup(ct);
if (!timeouts)
timeouts = udp_get_timeouts(nf_ct_net(ct));
/* If we've seen traffic both ways, this is some kind of UDP
stream. Extend timeout. */
if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
nf_ct_refresh_acct(ct, ctinfo, skb,
timeouts[UDP_CT_REPLIED]);
if (unlikely((ct->status & IPS_NAT_CLASH)))
return NF_ACCEPT;
/* Also, more likely to be important, and not a probe */
if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
nf_conntrack_event_cache(IPCT_ASSURED, ct);
} else {
nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[UDP_CT_UNREPLIED]);
}
return NF_ACCEPT;
}
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
static int udp_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
unsigned int *timeouts = data;
struct nf_udp_net *un = nf_udp_pernet(net);
if (!timeouts)
timeouts = un->timeouts;
/* set default timeouts for UDP. */
timeouts[UDP_CT_UNREPLIED] = un->timeouts[UDP_CT_UNREPLIED];
timeouts[UDP_CT_REPLIED] = un->timeouts[UDP_CT_REPLIED];
if (tb[CTA_TIMEOUT_UDP_UNREPLIED]) {
timeouts[UDP_CT_UNREPLIED] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDP_UNREPLIED])) * HZ;
}
if (tb[CTA_TIMEOUT_UDP_REPLIED]) {
timeouts[UDP_CT_REPLIED] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDP_REPLIED])) * HZ;
}
return 0;
}
static int
udp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
{
const unsigned int *timeouts = data;
if (nla_put_be32(skb, CTA_TIMEOUT_UDP_UNREPLIED,
htonl(timeouts[UDP_CT_UNREPLIED] / HZ)) ||
nla_put_be32(skb, CTA_TIMEOUT_UDP_REPLIED,
htonl(timeouts[UDP_CT_REPLIED] / HZ)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -ENOSPC;
}
static const struct nla_policy
udp_timeout_nla_policy[CTA_TIMEOUT_UDP_MAX+1] = {
[CTA_TIMEOUT_UDP_UNREPLIED] = { .type = NLA_U32 },
[CTA_TIMEOUT_UDP_REPLIED] = { .type = NLA_U32 },
};
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
void nf_conntrack_udp_init_net(struct net *net)
{
struct nf_udp_net *un = nf_udp_pernet(net);
int i;
for (i = 0; i < UDP_CT_MAX; i++)
un->timeouts[i] = udp_timeouts[i];
#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
un->offload_timeout = 30 * HZ;
#endif
}
const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp =
{
.l4proto = IPPROTO_UDP,
.allow_clash = true,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = udp_timeout_nlattr_to_obj,
.obj_to_nlattr = udp_timeout_obj_to_nlattr,
.nlattr_max = CTA_TIMEOUT_UDP_MAX,
.obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
.nla_policy = udp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
};
#ifdef CONFIG_NF_CT_PROTO_UDPLITE
const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite =
{
.l4proto = IPPROTO_UDPLITE,
.allow_clash = true,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = udp_timeout_nlattr_to_obj,
.obj_to_nlattr = udp_timeout_obj_to_nlattr,
.nlattr_max = CTA_TIMEOUT_UDP_MAX,
.obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
.nla_policy = udp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
};
#endif
| linux-master | net/netfilter/nf_conntrack_proto_udp.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Connection state tracking for netfilter. This is separated from,
but required by, the NAT layer; it can also be used by an iptables
extension. */
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2006 Netfilter Core Team <[email protected]>
* (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
* (C) 2005-2012 Patrick McHardy <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/netfilter.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <linux/vmalloc.h>
#include <linux/stddef.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/siphash.h>
#include <linux/err.h>
#include <linux/percpu.h>
#include <linux/moduleparam.h>
#include <linux/notifier.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/socket.h>
#include <linux/mm.h>
#include <linux/nsproxy.h>
#include <linux/rculist_nulls.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_bpf.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_extend.h>
#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_conntrack_timestamp.h>
#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_conntrack_labels.h>
#include <net/netfilter/nf_conntrack_synproxy.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_helper.h>
#include <net/netns/hash.h>
#include <net/ip.h>
#include "nf_internals.h"
__cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
EXPORT_SYMBOL_GPL(nf_conntrack_locks);
__cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);
EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
struct hlist_nulls_head *nf_conntrack_hash __read_mostly;
EXPORT_SYMBOL_GPL(nf_conntrack_hash);
struct conntrack_gc_work {
struct delayed_work dwork;
u32 next_bucket;
u32 avg_timeout;
u32 count;
u32 start_time;
bool exiting;
bool early_drop;
};
static __read_mostly struct kmem_cache *nf_conntrack_cachep;
static DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
static __read_mostly bool nf_conntrack_locks_all;
/* serialize hash resizes and nf_ct_iterate_cleanup */
static DEFINE_MUTEX(nf_conntrack_mutex);
#define GC_SCAN_INTERVAL_MAX (60ul * HZ)
#define GC_SCAN_INTERVAL_MIN (1ul * HZ)
/* clamp timeouts to this value (TCP unacked) */
#define GC_SCAN_INTERVAL_CLAMP (300ul * HZ)
/* Initial bias pretending we have 100 entries at the upper bound so we don't
* wakeup often just because we have three entries with a 1s timeout while still
* allowing non-idle machines to wakeup more often when needed.
*/
#define GC_SCAN_INITIAL_COUNT 100
#define GC_SCAN_INTERVAL_INIT GC_SCAN_INTERVAL_MAX
#define GC_SCAN_MAX_DURATION msecs_to_jiffies(10)
#define GC_SCAN_EXPIRED_MAX (64000u / HZ)
#define MIN_CHAINLEN 50u
#define MAX_CHAINLEN (80u - MIN_CHAINLEN)
static struct conntrack_gc_work conntrack_gc_work;
void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
{
/* 1) Acquire the lock */
spin_lock(lock);
/* 2) read nf_conntrack_locks_all, with ACQUIRE semantics
* It pairs with the smp_store_release() in nf_conntrack_all_unlock()
*/
if (likely(smp_load_acquire(&nf_conntrack_locks_all) == false))
return;
/* fast path failed, unlock */
spin_unlock(lock);
/* Slow path 1) get global lock */
spin_lock(&nf_conntrack_locks_all_lock);
/* Slow path 2) get the lock we want */
spin_lock(lock);
/* Slow path 3) release the global lock */
spin_unlock(&nf_conntrack_locks_all_lock);
}
EXPORT_SYMBOL_GPL(nf_conntrack_lock);
static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
{
h1 %= CONNTRACK_LOCKS;
h2 %= CONNTRACK_LOCKS;
spin_unlock(&nf_conntrack_locks[h1]);
if (h1 != h2)
spin_unlock(&nf_conntrack_locks[h2]);
}
/* return true if we need to recompute hashes (in case hash table was resized) */
static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
unsigned int h2, unsigned int sequence)
{
h1 %= CONNTRACK_LOCKS;
h2 %= CONNTRACK_LOCKS;
if (h1 <= h2) {
nf_conntrack_lock(&nf_conntrack_locks[h1]);
if (h1 != h2)
spin_lock_nested(&nf_conntrack_locks[h2],
SINGLE_DEPTH_NESTING);
} else {
nf_conntrack_lock(&nf_conntrack_locks[h2]);
spin_lock_nested(&nf_conntrack_locks[h1],
SINGLE_DEPTH_NESTING);
}
if (read_seqcount_retry(&nf_conntrack_generation, sequence)) {
nf_conntrack_double_unlock(h1, h2);
return true;
}
return false;
}
static void nf_conntrack_all_lock(void)
__acquires(&nf_conntrack_locks_all_lock)
{
int i;
spin_lock(&nf_conntrack_locks_all_lock);
/* For nf_contrack_locks_all, only the latest time when another
* CPU will see an update is controlled, by the "release" of the
* spin_lock below.
* The earliest time is not controlled, an thus KCSAN could detect
* a race when nf_conntract_lock() reads the variable.
* WRITE_ONCE() is used to ensure the compiler will not
* optimize the write.
*/
WRITE_ONCE(nf_conntrack_locks_all, true);
for (i = 0; i < CONNTRACK_LOCKS; i++) {
spin_lock(&nf_conntrack_locks[i]);
/* This spin_unlock provides the "release" to ensure that
* nf_conntrack_locks_all==true is visible to everyone that
* acquired spin_lock(&nf_conntrack_locks[]).
*/
spin_unlock(&nf_conntrack_locks[i]);
}
}
static void nf_conntrack_all_unlock(void)
__releases(&nf_conntrack_locks_all_lock)
{
/* All prior stores must be complete before we clear
* 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock()
* might observe the false value but not the entire
* critical section.
* It pairs with the smp_load_acquire() in nf_conntrack_lock()
*/
smp_store_release(&nf_conntrack_locks_all, false);
spin_unlock(&nf_conntrack_locks_all_lock);
}
unsigned int nf_conntrack_htable_size __read_mostly;
EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
unsigned int nf_conntrack_max __read_mostly;
EXPORT_SYMBOL_GPL(nf_conntrack_max);
seqcount_spinlock_t nf_conntrack_generation __read_mostly;
static siphash_aligned_key_t nf_conntrack_hash_rnd;
static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
unsigned int zoneid,
const struct net *net)
{
siphash_key_t key;
get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd));
key = nf_conntrack_hash_rnd;
key.key[0] ^= zoneid;
key.key[1] ^= net_hash_mix(net);
return siphash((void *)tuple,
offsetofend(struct nf_conntrack_tuple, dst.__nfct_hash_offsetend),
&key);
}
static u32 scale_hash(u32 hash)
{
return reciprocal_scale(hash, nf_conntrack_htable_size);
}
static u32 __hash_conntrack(const struct net *net,
const struct nf_conntrack_tuple *tuple,
unsigned int zoneid,
unsigned int size)
{
return reciprocal_scale(hash_conntrack_raw(tuple, zoneid, net), size);
}
static u32 hash_conntrack(const struct net *net,
const struct nf_conntrack_tuple *tuple,
unsigned int zoneid)
{
return scale_hash(hash_conntrack_raw(tuple, zoneid, net));
}
static bool nf_ct_get_tuple_ports(const struct sk_buff *skb,
unsigned int dataoff,
struct nf_conntrack_tuple *tuple)
{ struct {
__be16 sport;
__be16 dport;
} _inet_hdr, *inet_hdr;
/* Actually only need first 4 bytes to get ports. */
inet_hdr = skb_header_pointer(skb, dataoff, sizeof(_inet_hdr), &_inet_hdr);
if (!inet_hdr)
return false;
tuple->src.u.udp.port = inet_hdr->sport;
tuple->dst.u.udp.port = inet_hdr->dport;
return true;
}
static bool
nf_ct_get_tuple(const struct sk_buff *skb,
unsigned int nhoff,
unsigned int dataoff,
u_int16_t l3num,
u_int8_t protonum,
struct net *net,
struct nf_conntrack_tuple *tuple)
{
unsigned int size;
const __be32 *ap;
__be32 _addrs[8];
memset(tuple, 0, sizeof(*tuple));
tuple->src.l3num = l3num;
switch (l3num) {
case NFPROTO_IPV4:
nhoff += offsetof(struct iphdr, saddr);
size = 2 * sizeof(__be32);
break;
case NFPROTO_IPV6:
nhoff += offsetof(struct ipv6hdr, saddr);
size = sizeof(_addrs);
break;
default:
return true;
}
ap = skb_header_pointer(skb, nhoff, size, _addrs);
if (!ap)
return false;
switch (l3num) {
case NFPROTO_IPV4:
tuple->src.u3.ip = ap[0];
tuple->dst.u3.ip = ap[1];
break;
case NFPROTO_IPV6:
memcpy(tuple->src.u3.ip6, ap, sizeof(tuple->src.u3.ip6));
memcpy(tuple->dst.u3.ip6, ap + 4, sizeof(tuple->dst.u3.ip6));
break;
}
tuple->dst.protonum = protonum;
tuple->dst.dir = IP_CT_DIR_ORIGINAL;
switch (protonum) {
#if IS_ENABLED(CONFIG_IPV6)
case IPPROTO_ICMPV6:
return icmpv6_pkt_to_tuple(skb, dataoff, net, tuple);
#endif
case IPPROTO_ICMP:
return icmp_pkt_to_tuple(skb, dataoff, net, tuple);
#ifdef CONFIG_NF_CT_PROTO_GRE
case IPPROTO_GRE:
return gre_pkt_to_tuple(skb, dataoff, net, tuple);
#endif
case IPPROTO_TCP:
case IPPROTO_UDP:
#ifdef CONFIG_NF_CT_PROTO_UDPLITE
case IPPROTO_UDPLITE:
#endif
#ifdef CONFIG_NF_CT_PROTO_SCTP
case IPPROTO_SCTP:
#endif
#ifdef CONFIG_NF_CT_PROTO_DCCP
case IPPROTO_DCCP:
#endif
/* fallthrough */
return nf_ct_get_tuple_ports(skb, dataoff, tuple);
default:
break;
}
return true;
}
static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
u_int8_t *protonum)
{
int dataoff = -1;
const struct iphdr *iph;
struct iphdr _iph;
iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
if (!iph)
return -1;
/* Conntrack defragments packets, we might still see fragments
* inside ICMP packets though.
*/
if (iph->frag_off & htons(IP_OFFSET))
return -1;
dataoff = nhoff + (iph->ihl << 2);
*protonum = iph->protocol;
/* Check bogus IP headers */
if (dataoff > skb->len) {
pr_debug("bogus IPv4 packet: nhoff %u, ihl %u, skblen %u\n",
nhoff, iph->ihl << 2, skb->len);
return -1;
}
return dataoff;
}
#if IS_ENABLED(CONFIG_IPV6)
static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
u8 *protonum)
{
int protoff = -1;
unsigned int extoff = nhoff + sizeof(struct ipv6hdr);
__be16 frag_off;
u8 nexthdr;
if (skb_copy_bits(skb, nhoff + offsetof(struct ipv6hdr, nexthdr),
&nexthdr, sizeof(nexthdr)) != 0) {
pr_debug("can't get nexthdr\n");
return -1;
}
protoff = ipv6_skip_exthdr(skb, extoff, &nexthdr, &frag_off);
/*
* (protoff == skb->len) means the packet has not data, just
* IPv6 and possibly extensions headers, but it is tracked anyway
*/
if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
pr_debug("can't find proto in pkt\n");
return -1;
}
*protonum = nexthdr;
return protoff;
}
#endif
static int get_l4proto(const struct sk_buff *skb,
unsigned int nhoff, u8 pf, u8 *l4num)
{
switch (pf) {
case NFPROTO_IPV4:
return ipv4_get_l4proto(skb, nhoff, l4num);
#if IS_ENABLED(CONFIG_IPV6)
case NFPROTO_IPV6:
return ipv6_get_l4proto(skb, nhoff, l4num);
#endif
default:
*l4num = 0;
break;
}
return -1;
}
bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
u_int16_t l3num,
struct net *net, struct nf_conntrack_tuple *tuple)
{
u8 protonum;
int protoff;
protoff = get_l4proto(skb, nhoff, l3num, &protonum);
if (protoff <= 0)
return false;
return nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple);
}
EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
bool
nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
const struct nf_conntrack_tuple *orig)
{
memset(inverse, 0, sizeof(*inverse));
inverse->src.l3num = orig->src.l3num;
switch (orig->src.l3num) {
case NFPROTO_IPV4:
inverse->src.u3.ip = orig->dst.u3.ip;
inverse->dst.u3.ip = orig->src.u3.ip;
break;
case NFPROTO_IPV6:
inverse->src.u3.in6 = orig->dst.u3.in6;
inverse->dst.u3.in6 = orig->src.u3.in6;
break;
default:
break;
}
inverse->dst.dir = !orig->dst.dir;
inverse->dst.protonum = orig->dst.protonum;
switch (orig->dst.protonum) {
case IPPROTO_ICMP:
return nf_conntrack_invert_icmp_tuple(inverse, orig);
#if IS_ENABLED(CONFIG_IPV6)
case IPPROTO_ICMPV6:
return nf_conntrack_invert_icmpv6_tuple(inverse, orig);
#endif
}
inverse->src.u.all = orig->dst.u.all;
inverse->dst.u.all = orig->src.u.all;
return true;
}
EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
/* Generate a almost-unique pseudo-id for a given conntrack.
*
* intentionally doesn't re-use any of the seeds used for hash
* table location, we assume id gets exposed to userspace.
*
* Following nf_conn items do not change throughout lifetime
* of the nf_conn:
*
* 1. nf_conn address
* 2. nf_conn->master address (normally NULL)
* 3. the associated net namespace
* 4. the original direction tuple
*/
u32 nf_ct_get_id(const struct nf_conn *ct)
{
static siphash_aligned_key_t ct_id_seed;
unsigned long a, b, c, d;
net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
a = (unsigned long)ct;
b = (unsigned long)ct->master;
c = (unsigned long)nf_ct_net(ct);
d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple),
&ct_id_seed);
#ifdef CONFIG_64BIT
return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
#else
return siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &ct_id_seed);
#endif
}
EXPORT_SYMBOL_GPL(nf_ct_get_id);
static void
clean_from_lists(struct nf_conn *ct)
{
hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
/* Destroy all pending expectations */
nf_ct_remove_expectations(ct);
}
#define NFCT_ALIGN(len) (((len) + NFCT_INFOMASK) & ~NFCT_INFOMASK)
/* Released via nf_ct_destroy() */
struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
const struct nf_conntrack_zone *zone,
gfp_t flags)
{
struct nf_conn *tmpl, *p;
if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK) {
tmpl = kzalloc(sizeof(*tmpl) + NFCT_INFOMASK, flags);
if (!tmpl)
return NULL;
p = tmpl;
tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
if (tmpl != p) {
tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
tmpl->proto.tmpl_padto = (char *)tmpl - (char *)p;
}
} else {
tmpl = kzalloc(sizeof(*tmpl), flags);
if (!tmpl)
return NULL;
}
tmpl->status = IPS_TEMPLATE;
write_pnet(&tmpl->ct_net, net);
nf_ct_zone_add(tmpl, zone);
refcount_set(&tmpl->ct_general.use, 1);
return tmpl;
}
EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
void nf_ct_tmpl_free(struct nf_conn *tmpl)
{
kfree(tmpl->ext);
if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK)
kfree((char *)tmpl - tmpl->proto.tmpl_padto);
else
kfree(tmpl);
}
EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
static void destroy_gre_conntrack(struct nf_conn *ct)
{
#ifdef CONFIG_NF_CT_PROTO_GRE
struct nf_conn *master = ct->master;
if (master)
nf_ct_gre_keymap_destroy(master);
#endif
}
void nf_ct_destroy(struct nf_conntrack *nfct)
{
struct nf_conn *ct = (struct nf_conn *)nfct;
WARN_ON(refcount_read(&nfct->use) != 0);
if (unlikely(nf_ct_is_template(ct))) {
nf_ct_tmpl_free(ct);
return;
}
if (unlikely(nf_ct_protonum(ct) == IPPROTO_GRE))
destroy_gre_conntrack(ct);
/* Expectations will have been removed in clean_from_lists,
* except TFTP can create an expectation on the first packet,
* before connection is in the list, so we need to clean here,
* too.
*/
nf_ct_remove_expectations(ct);
if (ct->master)
nf_ct_put(ct->master);
nf_conntrack_free(ct);
}
EXPORT_SYMBOL(nf_ct_destroy);
static void __nf_ct_delete_from_lists(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
unsigned int hash, reply_hash;
unsigned int sequence;
do {
sequence = read_seqcount_begin(&nf_conntrack_generation);
hash = hash_conntrack(net,
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_ORIGINAL));
reply_hash = hash_conntrack(net,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY));
} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
clean_from_lists(ct);
nf_conntrack_double_unlock(hash, reply_hash);
}
static void nf_ct_delete_from_lists(struct nf_conn *ct)
{
nf_ct_helper_destroy(ct);
local_bh_disable();
__nf_ct_delete_from_lists(ct);
local_bh_enable();
}
static void nf_ct_add_to_ecache_list(struct nf_conn *ct)
{
#ifdef CONFIG_NF_CONNTRACK_EVENTS
struct nf_conntrack_net *cnet = nf_ct_pernet(nf_ct_net(ct));
spin_lock(&cnet->ecache.dying_lock);
hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
&cnet->ecache.dying_list);
spin_unlock(&cnet->ecache.dying_lock);
#endif
}
bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
{
struct nf_conn_tstamp *tstamp;
struct net *net;
if (test_and_set_bit(IPS_DYING_BIT, &ct->status))
return false;
tstamp = nf_conn_tstamp_find(ct);
if (tstamp) {
s32 timeout = READ_ONCE(ct->timeout) - nfct_time_stamp;
tstamp->stop = ktime_get_real_ns();
if (timeout < 0)
tstamp->stop -= jiffies_to_nsecs(-timeout);
}
if (nf_conntrack_event_report(IPCT_DESTROY, ct,
portid, report) < 0) {
/* destroy event was not delivered. nf_ct_put will
* be done by event cache worker on redelivery.
*/
nf_ct_helper_destroy(ct);
local_bh_disable();
__nf_ct_delete_from_lists(ct);
nf_ct_add_to_ecache_list(ct);
local_bh_enable();
nf_conntrack_ecache_work(nf_ct_net(ct), NFCT_ECACHE_DESTROY_FAIL);
return false;
}
net = nf_ct_net(ct);
if (nf_conntrack_ecache_dwork_pending(net))
nf_conntrack_ecache_work(net, NFCT_ECACHE_DESTROY_SENT);
nf_ct_delete_from_lists(ct);
nf_ct_put(ct);
return true;
}
EXPORT_SYMBOL_GPL(nf_ct_delete);
static inline bool
nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_zone *zone,
const struct net *net)
{
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
/* A conntrack can be recreated with the equal tuple,
* so we need to check that the conntrack is confirmed
*/
return nf_ct_tuple_equal(tuple, &h->tuple) &&
nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
nf_ct_is_confirmed(ct) &&
net_eq(net, nf_ct_net(ct));
}
static inline bool
nf_ct_match(const struct nf_conn *ct1, const struct nf_conn *ct2)
{
return nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
&ct2->tuplehash[IP_CT_DIR_ORIGINAL].tuple) &&
nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_REPLY].tuple,
&ct2->tuplehash[IP_CT_DIR_REPLY].tuple) &&
nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_ORIGINAL) &&
nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_REPLY) &&
net_eq(nf_ct_net(ct1), nf_ct_net(ct2));
}
/* caller must hold rcu readlock and none of the nf_conntrack_locks */
static void nf_ct_gc_expired(struct nf_conn *ct)
{
if (!refcount_inc_not_zero(&ct->ct_general.use))
return;
/* load ->status after refcount increase */
smp_acquire__after_ctrl_dep();
if (nf_ct_should_gc(ct))
nf_ct_kill(ct);
nf_ct_put(ct);
}
/*
* Warning :
* - Caller must take a reference on returned object
* and recheck nf_ct_tuple_equal(tuple, &h->tuple)
*/
static struct nf_conntrack_tuple_hash *
____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple, u32 hash)
{
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_head *ct_hash;
struct hlist_nulls_node *n;
unsigned int bucket, hsize;
begin:
nf_conntrack_get_ht(&ct_hash, &hsize);
bucket = reciprocal_scale(hash, hsize);
hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) {
struct nf_conn *ct;
ct = nf_ct_tuplehash_to_ctrack(h);
if (nf_ct_is_expired(ct)) {
nf_ct_gc_expired(ct);
continue;
}
if (nf_ct_key_equal(h, tuple, zone, net))
return h;
}
/*
* if the nulls value we got at the end of this lookup is
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
if (get_nulls_value(n) != bucket) {
NF_CT_STAT_INC_ATOMIC(net, search_restart);
goto begin;
}
return NULL;
}
/* Find a connection corresponding to a tuple. */
static struct nf_conntrack_tuple_hash *
__nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple, u32 hash)
{
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
h = ____nf_conntrack_find(net, zone, tuple, hash);
if (h) {
/* We have a candidate that matches the tuple we're interested
* in, try to obtain a reference and re-check tuple
*/
ct = nf_ct_tuplehash_to_ctrack(h);
if (likely(refcount_inc_not_zero(&ct->ct_general.use))) {
/* re-check key after refcount */
smp_acquire__after_ctrl_dep();
if (likely(nf_ct_key_equal(h, tuple, zone, net)))
return h;
/* TYPESAFE_BY_RCU recycled the candidate */
nf_ct_put(ct);
}
h = NULL;
}
return h;
}
struct nf_conntrack_tuple_hash *
nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple)
{
unsigned int rid, zone_id = nf_ct_zone_id(zone, IP_CT_DIR_ORIGINAL);
struct nf_conntrack_tuple_hash *thash;
rcu_read_lock();
thash = __nf_conntrack_find_get(net, zone, tuple,
hash_conntrack_raw(tuple, zone_id, net));
if (thash)
goto out_unlock;
rid = nf_ct_zone_id(zone, IP_CT_DIR_REPLY);
if (rid != zone_id)
thash = __nf_conntrack_find_get(net, zone, tuple,
hash_conntrack_raw(tuple, rid, net));
out_unlock:
rcu_read_unlock();
return thash;
}
EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
static void __nf_conntrack_hash_insert(struct nf_conn *ct,
unsigned int hash,
unsigned int reply_hash)
{
hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
&nf_conntrack_hash[hash]);
hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
&nf_conntrack_hash[reply_hash]);
}
static bool nf_ct_ext_valid_pre(const struct nf_ct_ext *ext)
{
/* if ext->gen_id is not equal to nf_conntrack_ext_genid, some extensions
* may contain stale pointers to e.g. helper that has been removed.
*
* The helper can't clear this because the nf_conn object isn't in
* any hash and synchronize_rcu() isn't enough because associated skb
* might sit in a queue.
*/
return !ext || ext->gen_id == atomic_read(&nf_conntrack_ext_genid);
}
static bool nf_ct_ext_valid_post(struct nf_ct_ext *ext)
{
if (!ext)
return true;
if (ext->gen_id != atomic_read(&nf_conntrack_ext_genid))
return false;
/* inserted into conntrack table, nf_ct_iterate_cleanup()
* will find it. Disable nf_ct_ext_find() id check.
*/
WRITE_ONCE(ext->gen_id, 0);
return true;
}
int
nf_conntrack_hash_check_insert(struct nf_conn *ct)
{
const struct nf_conntrack_zone *zone;
struct net *net = nf_ct_net(ct);
unsigned int hash, reply_hash;
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
unsigned int max_chainlen;
unsigned int chainlen = 0;
unsigned int sequence;
int err = -EEXIST;
zone = nf_ct_zone(ct);
if (!nf_ct_ext_valid_pre(ct->ext))
return -EAGAIN;
local_bh_disable();
do {
sequence = read_seqcount_begin(&nf_conntrack_generation);
hash = hash_conntrack(net,
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_ORIGINAL));
reply_hash = hash_conntrack(net,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY));
} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
max_chainlen = MIN_CHAINLEN + get_random_u32_below(MAX_CHAINLEN);
/* See if there's one in the list already, including reverse */
hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode) {
if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
zone, net))
goto out;
if (chainlen++ > max_chainlen)
goto chaintoolong;
}
chainlen = 0;
hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode) {
if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
zone, net))
goto out;
if (chainlen++ > max_chainlen)
goto chaintoolong;
}
/* If genid has changed, we can't insert anymore because ct
* extensions could have stale pointers and nf_ct_iterate_destroy
* might have completed its table scan already.
*
* Increment of the ext genid right after this check is fine:
* nf_ct_iterate_destroy blocks until locks are released.
*/
if (!nf_ct_ext_valid_post(ct->ext)) {
err = -EAGAIN;
goto out;
}
smp_wmb();
/* The caller holds a reference to this object */
refcount_set(&ct->ct_general.use, 2);
__nf_conntrack_hash_insert(ct, hash, reply_hash);
nf_conntrack_double_unlock(hash, reply_hash);
NF_CT_STAT_INC(net, insert);
local_bh_enable();
return 0;
chaintoolong:
NF_CT_STAT_INC(net, chaintoolong);
err = -ENOSPC;
out:
nf_conntrack_double_unlock(hash, reply_hash);
local_bh_enable();
return err;
}
EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
void nf_ct_acct_add(struct nf_conn *ct, u32 dir, unsigned int packets,
unsigned int bytes)
{
struct nf_conn_acct *acct;
acct = nf_conn_acct_find(ct);
if (acct) {
struct nf_conn_counter *counter = acct->counter;
atomic64_add(packets, &counter[dir].packets);
atomic64_add(bytes, &counter[dir].bytes);
}
}
EXPORT_SYMBOL_GPL(nf_ct_acct_add);
static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
const struct nf_conn *loser_ct)
{
struct nf_conn_acct *acct;
acct = nf_conn_acct_find(loser_ct);
if (acct) {
struct nf_conn_counter *counter = acct->counter;
unsigned int bytes;
/* u32 should be fine since we must have seen one packet. */
bytes = atomic64_read(&counter[CTINFO2DIR(ctinfo)].bytes);
nf_ct_acct_update(ct, CTINFO2DIR(ctinfo), bytes);
}
}
static void __nf_conntrack_insert_prepare(struct nf_conn *ct)
{
struct nf_conn_tstamp *tstamp;
refcount_inc(&ct->ct_general.use);
/* set conntrack timestamp, if enabled. */
tstamp = nf_conn_tstamp_find(ct);
if (tstamp)
tstamp->start = ktime_get_real_ns();
}
/* caller must hold locks to prevent concurrent changes */
static int __nf_ct_resolve_clash(struct sk_buff *skb,
struct nf_conntrack_tuple_hash *h)
{
/* This is the conntrack entry already in hashes that won race. */
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
enum ip_conntrack_info ctinfo;
struct nf_conn *loser_ct;
loser_ct = nf_ct_get(skb, &ctinfo);
if (nf_ct_is_dying(ct))
return NF_DROP;
if (((ct->status & IPS_NAT_DONE_MASK) == 0) ||
nf_ct_match(ct, loser_ct)) {
struct net *net = nf_ct_net(ct);
nf_conntrack_get(&ct->ct_general);
nf_ct_acct_merge(ct, ctinfo, loser_ct);
nf_ct_put(loser_ct);
nf_ct_set(skb, ct, ctinfo);
NF_CT_STAT_INC(net, clash_resolve);
return NF_ACCEPT;
}
return NF_DROP;
}
/**
* nf_ct_resolve_clash_harder - attempt to insert clashing conntrack entry
*
* @skb: skb that causes the collision
* @repl_idx: hash slot for reply direction
*
* Called when origin or reply direction had a clash.
* The skb can be handled without packet drop provided the reply direction
* is unique or there the existing entry has the identical tuple in both
* directions.
*
* Caller must hold conntrack table locks to prevent concurrent updates.
*
* Returns NF_DROP if the clash could not be handled.
*/
static int nf_ct_resolve_clash_harder(struct sk_buff *skb, u32 repl_idx)
{
struct nf_conn *loser_ct = (struct nf_conn *)skb_nfct(skb);
const struct nf_conntrack_zone *zone;
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
struct net *net;
zone = nf_ct_zone(loser_ct);
net = nf_ct_net(loser_ct);
/* Reply direction must never result in a clash, unless both origin
* and reply tuples are identical.
*/
hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[repl_idx], hnnode) {
if (nf_ct_key_equal(h,
&loser_ct->tuplehash[IP_CT_DIR_REPLY].tuple,
zone, net))
return __nf_ct_resolve_clash(skb, h);
}
/* We want the clashing entry to go away real soon: 1 second timeout. */
WRITE_ONCE(loser_ct->timeout, nfct_time_stamp + HZ);
/* IPS_NAT_CLASH removes the entry automatically on the first
* reply. Also prevents UDP tracker from moving the entry to
* ASSURED state, i.e. the entry can always be evicted under
* pressure.
*/
loser_ct->status |= IPS_FIXED_TIMEOUT | IPS_NAT_CLASH;
__nf_conntrack_insert_prepare(loser_ct);
/* fake add for ORIGINAL dir: we want lookups to only find the entry
* already in the table. This also hides the clashing entry from
* ctnetlink iteration, i.e. conntrack -L won't show them.
*/
hlist_nulls_add_fake(&loser_ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
hlist_nulls_add_head_rcu(&loser_ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
&nf_conntrack_hash[repl_idx]);
NF_CT_STAT_INC(net, clash_resolve);
return NF_ACCEPT;
}
/**
* nf_ct_resolve_clash - attempt to handle clash without packet drop
*
* @skb: skb that causes the clash
* @h: tuplehash of the clashing entry already in table
* @reply_hash: hash slot for reply direction
*
* A conntrack entry can be inserted to the connection tracking table
* if there is no existing entry with an identical tuple.
*
* If there is one, @skb (and the assocated, unconfirmed conntrack) has
* to be dropped. In case @skb is retransmitted, next conntrack lookup
* will find the already-existing entry.
*
* The major problem with such packet drop is the extra delay added by
* the packet loss -- it will take some time for a retransmit to occur
* (or the sender to time out when waiting for a reply).
*
* This function attempts to handle the situation without packet drop.
*
* If @skb has no NAT transformation or if the colliding entries are
* exactly the same, only the to-be-confirmed conntrack entry is discarded
* and @skb is associated with the conntrack entry already in the table.
*
* Failing that, the new, unconfirmed conntrack is still added to the table
* provided that the collision only occurs in the ORIGINAL direction.
* The new entry will be added only in the non-clashing REPLY direction,
* so packets in the ORIGINAL direction will continue to match the existing
* entry. The new entry will also have a fixed timeout so it expires --
* due to the collision, it will only see reply traffic.
*
* Returns NF_DROP if the clash could not be resolved.
*/
static __cold noinline int
nf_ct_resolve_clash(struct sk_buff *skb, struct nf_conntrack_tuple_hash *h,
u32 reply_hash)
{
/* This is the conntrack entry already in hashes that won race. */
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
const struct nf_conntrack_l4proto *l4proto;
enum ip_conntrack_info ctinfo;
struct nf_conn *loser_ct;
struct net *net;
int ret;
loser_ct = nf_ct_get(skb, &ctinfo);
net = nf_ct_net(loser_ct);
l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
if (!l4proto->allow_clash)
goto drop;
ret = __nf_ct_resolve_clash(skb, h);
if (ret == NF_ACCEPT)
return ret;
ret = nf_ct_resolve_clash_harder(skb, reply_hash);
if (ret == NF_ACCEPT)
return ret;
drop:
NF_CT_STAT_INC(net, drop);
NF_CT_STAT_INC(net, insert_failed);
return NF_DROP;
}
/* Confirm a connection given skb; places it in hash table */
int
__nf_conntrack_confirm(struct sk_buff *skb)
{
unsigned int chainlen = 0, sequence, max_chainlen;
const struct nf_conntrack_zone *zone;
unsigned int hash, reply_hash;
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
struct nf_conn_help *help;
struct hlist_nulls_node *n;
enum ip_conntrack_info ctinfo;
struct net *net;
int ret = NF_DROP;
ct = nf_ct_get(skb, &ctinfo);
net = nf_ct_net(ct);
/* ipt_REJECT uses nf_conntrack_attach to attach related
ICMP/TCP RST packets in other direction. Actual packet
which created connection will be IP_CT_NEW or for an
expected connection, IP_CT_RELATED. */
if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
return NF_ACCEPT;
zone = nf_ct_zone(ct);
local_bh_disable();
do {
sequence = read_seqcount_begin(&nf_conntrack_generation);
/* reuse the hash saved before */
hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
hash = scale_hash(hash);
reply_hash = hash_conntrack(net,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY));
} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
/* We're not in hash table, and we refuse to set up related
* connections for unconfirmed conns. But packet copies and
* REJECT will give spurious warnings here.
*/
/* Another skb with the same unconfirmed conntrack may
* win the race. This may happen for bridge(br_flood)
* or broadcast/multicast packets do skb_clone with
* unconfirmed conntrack.
*/
if (unlikely(nf_ct_is_confirmed(ct))) {
WARN_ON_ONCE(1);
nf_conntrack_double_unlock(hash, reply_hash);
local_bh_enable();
return NF_DROP;
}
if (!nf_ct_ext_valid_pre(ct->ext)) {
NF_CT_STAT_INC(net, insert_failed);
goto dying;
}
/* We have to check the DYING flag after unlink to prevent
* a race against nf_ct_get_next_corpse() possibly called from
* user context, else we insert an already 'dead' hash, blocking
* further use of that particular connection -JM.
*/
ct->status |= IPS_CONFIRMED;
if (unlikely(nf_ct_is_dying(ct))) {
NF_CT_STAT_INC(net, insert_failed);
goto dying;
}
max_chainlen = MIN_CHAINLEN + get_random_u32_below(MAX_CHAINLEN);
/* See if there's one in the list already, including reverse:
NAT could have grabbed it without realizing, since we're
not in the hash. If there is, we lost race. */
hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode) {
if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
zone, net))
goto out;
if (chainlen++ > max_chainlen)
goto chaintoolong;
}
chainlen = 0;
hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode) {
if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
zone, net))
goto out;
if (chainlen++ > max_chainlen) {
chaintoolong:
NF_CT_STAT_INC(net, chaintoolong);
NF_CT_STAT_INC(net, insert_failed);
ret = NF_DROP;
goto dying;
}
}
/* Timer relative to confirmation time, not original
setting time, otherwise we'd get timer wrap in
weird delay cases. */
ct->timeout += nfct_time_stamp;
__nf_conntrack_insert_prepare(ct);
/* Since the lookup is lockless, hash insertion must be done after
* starting the timer and setting the CONFIRMED bit. The RCU barriers
* guarantee that no other CPU can find the conntrack before the above
* stores are visible.
*/
__nf_conntrack_hash_insert(ct, hash, reply_hash);
nf_conntrack_double_unlock(hash, reply_hash);
local_bh_enable();
/* ext area is still valid (rcu read lock is held,
* but will go out of scope soon, we need to remove
* this conntrack again.
*/
if (!nf_ct_ext_valid_post(ct->ext)) {
nf_ct_kill(ct);
NF_CT_STAT_INC_ATOMIC(net, drop);
return NF_DROP;
}
help = nfct_help(ct);
if (help && help->helper)
nf_conntrack_event_cache(IPCT_HELPER, ct);
nf_conntrack_event_cache(master_ct(ct) ?
IPCT_RELATED : IPCT_NEW, ct);
return NF_ACCEPT;
out:
ret = nf_ct_resolve_clash(skb, h, reply_hash);
dying:
nf_conntrack_double_unlock(hash, reply_hash);
local_bh_enable();
return ret;
}
EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
/* Returns true if a connection corresponds to the tuple (required
for NAT). */
int
nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
const struct nf_conn *ignored_conntrack)
{
struct net *net = nf_ct_net(ignored_conntrack);
const struct nf_conntrack_zone *zone;
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_head *ct_hash;
unsigned int hash, hsize;
struct hlist_nulls_node *n;
struct nf_conn *ct;
zone = nf_ct_zone(ignored_conntrack);
rcu_read_lock();
begin:
nf_conntrack_get_ht(&ct_hash, &hsize);
hash = __hash_conntrack(net, tuple, nf_ct_zone_id(zone, IP_CT_DIR_REPLY), hsize);
hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) {
ct = nf_ct_tuplehash_to_ctrack(h);
if (ct == ignored_conntrack)
continue;
if (nf_ct_is_expired(ct)) {
nf_ct_gc_expired(ct);
continue;
}
if (nf_ct_key_equal(h, tuple, zone, net)) {
/* Tuple is taken already, so caller will need to find
* a new source port to use.
*
* Only exception:
* If the *original tuples* are identical, then both
* conntracks refer to the same flow.
* This is a rare situation, it can occur e.g. when
* more than one UDP packet is sent from same socket
* in different threads.
*
* Let nf_ct_resolve_clash() deal with this later.
*/
if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple) &&
nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL))
continue;
NF_CT_STAT_INC_ATOMIC(net, found);
rcu_read_unlock();
return 1;
}
}
if (get_nulls_value(n) != hash) {
NF_CT_STAT_INC_ATOMIC(net, search_restart);
goto begin;
}
rcu_read_unlock();
return 0;
}
EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
#define NF_CT_EVICTION_RANGE 8
/* There's a small race here where we may free a just-assured
connection. Too bad: we're in trouble anyway. */
static unsigned int early_drop_list(struct net *net,
struct hlist_nulls_head *head)
{
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
unsigned int drops = 0;
struct nf_conn *tmp;
hlist_nulls_for_each_entry_rcu(h, n, head, hnnode) {
tmp = nf_ct_tuplehash_to_ctrack(h);
if (nf_ct_is_expired(tmp)) {
nf_ct_gc_expired(tmp);
continue;
}
if (test_bit(IPS_ASSURED_BIT, &tmp->status) ||
!net_eq(nf_ct_net(tmp), net) ||
nf_ct_is_dying(tmp))
continue;
if (!refcount_inc_not_zero(&tmp->ct_general.use))
continue;
/* load ->ct_net and ->status after refcount increase */
smp_acquire__after_ctrl_dep();
/* kill only if still in same netns -- might have moved due to
* SLAB_TYPESAFE_BY_RCU rules.
*
* We steal the timer reference. If that fails timer has
* already fired or someone else deleted it. Just drop ref
* and move to next entry.
*/
if (net_eq(nf_ct_net(tmp), net) &&
nf_ct_is_confirmed(tmp) &&
nf_ct_delete(tmp, 0, 0))
drops++;
nf_ct_put(tmp);
}
return drops;
}
static noinline int early_drop(struct net *net, unsigned int hash)
{
unsigned int i, bucket;
for (i = 0; i < NF_CT_EVICTION_RANGE; i++) {
struct hlist_nulls_head *ct_hash;
unsigned int hsize, drops;
rcu_read_lock();
nf_conntrack_get_ht(&ct_hash, &hsize);
if (!i)
bucket = reciprocal_scale(hash, hsize);
else
bucket = (bucket + 1) % hsize;
drops = early_drop_list(net, &ct_hash[bucket]);
rcu_read_unlock();
if (drops) {
NF_CT_STAT_ADD_ATOMIC(net, early_drop, drops);
return true;
}
}
return false;
}
static bool gc_worker_skip_ct(const struct nf_conn *ct)
{
return !nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct);
}
static bool gc_worker_can_early_drop(const struct nf_conn *ct)
{
const struct nf_conntrack_l4proto *l4proto;
u8 protonum = nf_ct_protonum(ct);
if (test_bit(IPS_OFFLOAD_BIT, &ct->status) && protonum != IPPROTO_UDP)
return false;
if (!test_bit(IPS_ASSURED_BIT, &ct->status))
return true;
l4proto = nf_ct_l4proto_find(protonum);
if (l4proto->can_early_drop && l4proto->can_early_drop(ct))
return true;
return false;
}
static void gc_worker(struct work_struct *work)
{
unsigned int i, hashsz, nf_conntrack_max95 = 0;
u32 end_time, start_time = nfct_time_stamp;
struct conntrack_gc_work *gc_work;
unsigned int expired_count = 0;
unsigned long next_run;
s32 delta_time;
long count;
gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
i = gc_work->next_bucket;
if (gc_work->early_drop)
nf_conntrack_max95 = nf_conntrack_max / 100u * 95u;
if (i == 0) {
gc_work->avg_timeout = GC_SCAN_INTERVAL_INIT;
gc_work->count = GC_SCAN_INITIAL_COUNT;
gc_work->start_time = start_time;
}
next_run = gc_work->avg_timeout;
count = gc_work->count;
end_time = start_time + GC_SCAN_MAX_DURATION;
do {
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_head *ct_hash;
struct hlist_nulls_node *n;
struct nf_conn *tmp;
rcu_read_lock();
nf_conntrack_get_ht(&ct_hash, &hashsz);
if (i >= hashsz) {
rcu_read_unlock();
break;
}
hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) {
struct nf_conntrack_net *cnet;
struct net *net;
long expires;
tmp = nf_ct_tuplehash_to_ctrack(h);
if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) {
nf_ct_offload_timeout(tmp);
if (!nf_conntrack_max95)
continue;
}
if (expired_count > GC_SCAN_EXPIRED_MAX) {
rcu_read_unlock();
gc_work->next_bucket = i;
gc_work->avg_timeout = next_run;
gc_work->count = count;
delta_time = nfct_time_stamp - gc_work->start_time;
/* re-sched immediately if total cycle time is exceeded */
next_run = delta_time < (s32)GC_SCAN_INTERVAL_MAX;
goto early_exit;
}
if (nf_ct_is_expired(tmp)) {
nf_ct_gc_expired(tmp);
expired_count++;
continue;
}
expires = clamp(nf_ct_expires(tmp), GC_SCAN_INTERVAL_MIN, GC_SCAN_INTERVAL_CLAMP);
expires = (expires - (long)next_run) / ++count;
next_run += expires;
if (nf_conntrack_max95 == 0 || gc_worker_skip_ct(tmp))
continue;
net = nf_ct_net(tmp);
cnet = nf_ct_pernet(net);
if (atomic_read(&cnet->count) < nf_conntrack_max95)
continue;
/* need to take reference to avoid possible races */
if (!refcount_inc_not_zero(&tmp->ct_general.use))
continue;
/* load ->status after refcount increase */
smp_acquire__after_ctrl_dep();
if (gc_worker_skip_ct(tmp)) {
nf_ct_put(tmp);
continue;
}
if (gc_worker_can_early_drop(tmp)) {
nf_ct_kill(tmp);
expired_count++;
}
nf_ct_put(tmp);
}
/* could check get_nulls_value() here and restart if ct
* was moved to another chain. But given gc is best-effort
* we will just continue with next hash slot.
*/
rcu_read_unlock();
cond_resched();
i++;
delta_time = nfct_time_stamp - end_time;
if (delta_time > 0 && i < hashsz) {
gc_work->avg_timeout = next_run;
gc_work->count = count;
gc_work->next_bucket = i;
next_run = 0;
goto early_exit;
}
} while (i < hashsz);
gc_work->next_bucket = 0;
next_run = clamp(next_run, GC_SCAN_INTERVAL_MIN, GC_SCAN_INTERVAL_MAX);
delta_time = max_t(s32, nfct_time_stamp - gc_work->start_time, 1);
if (next_run > (unsigned long)delta_time)
next_run -= delta_time;
else
next_run = 1;
early_exit:
if (gc_work->exiting)
return;
if (next_run)
gc_work->early_drop = false;
queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run);
}
static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
{
INIT_DELAYED_WORK(&gc_work->dwork, gc_worker);
gc_work->exiting = false;
}
static struct nf_conn *
__nf_conntrack_alloc(struct net *net,
const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *orig,
const struct nf_conntrack_tuple *repl,
gfp_t gfp, u32 hash)
{
struct nf_conntrack_net *cnet = nf_ct_pernet(net);
unsigned int ct_count;
struct nf_conn *ct;
/* We don't want any race condition at early drop stage */
ct_count = atomic_inc_return(&cnet->count);
if (nf_conntrack_max && unlikely(ct_count > nf_conntrack_max)) {
if (!early_drop(net, hash)) {
if (!conntrack_gc_work.early_drop)
conntrack_gc_work.early_drop = true;
atomic_dec(&cnet->count);
net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
return ERR_PTR(-ENOMEM);
}
}
/*
* Do not use kmem_cache_zalloc(), as this cache uses
* SLAB_TYPESAFE_BY_RCU.
*/
ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
if (ct == NULL)
goto out;
spin_lock_init(&ct->lock);
ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
/* save hash for reusing when confirming */
*(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
ct->status = 0;
WRITE_ONCE(ct->timeout, 0);
write_pnet(&ct->ct_net, net);
memset_after(ct, 0, __nfct_init_offset);
nf_ct_zone_add(ct, zone);
/* Because we use RCU lookups, we set ct_general.use to zero before
* this is inserted in any list.
*/
refcount_set(&ct->ct_general.use, 0);
return ct;
out:
atomic_dec(&cnet->count);
return ERR_PTR(-ENOMEM);
}
struct nf_conn *nf_conntrack_alloc(struct net *net,
const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *orig,
const struct nf_conntrack_tuple *repl,
gfp_t gfp)
{
return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
}
EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
void nf_conntrack_free(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
struct nf_conntrack_net *cnet;
/* A freed object has refcnt == 0, that's
* the golden rule for SLAB_TYPESAFE_BY_RCU
*/
WARN_ON(refcount_read(&ct->ct_general.use) != 0);
if (ct->status & IPS_SRC_NAT_DONE) {
const struct nf_nat_hook *nat_hook;
rcu_read_lock();
nat_hook = rcu_dereference(nf_nat_hook);
if (nat_hook)
nat_hook->remove_nat_bysrc(ct);
rcu_read_unlock();
}
kfree(ct->ext);
kmem_cache_free(nf_conntrack_cachep, ct);
cnet = nf_ct_pernet(net);
smp_mb__before_atomic();
atomic_dec(&cnet->count);
}
EXPORT_SYMBOL_GPL(nf_conntrack_free);
/* Allocate a new conntrack: we return -ENOMEM if classification
failed due to stress. Otherwise it really is unclassifiable. */
static noinline struct nf_conntrack_tuple_hash *
init_conntrack(struct net *net, struct nf_conn *tmpl,
const struct nf_conntrack_tuple *tuple,
struct sk_buff *skb,
unsigned int dataoff, u32 hash)
{
struct nf_conn *ct;
struct nf_conn_help *help;
struct nf_conntrack_tuple repl_tuple;
#ifdef CONFIG_NF_CONNTRACK_EVENTS
struct nf_conntrack_ecache *ecache;
#endif
struct nf_conntrack_expect *exp = NULL;
const struct nf_conntrack_zone *zone;
struct nf_conn_timeout *timeout_ext;
struct nf_conntrack_zone tmp;
struct nf_conntrack_net *cnet;
if (!nf_ct_invert_tuple(&repl_tuple, tuple))
return NULL;
zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
hash);
if (IS_ERR(ct))
return (struct nf_conntrack_tuple_hash *)ct;
if (!nf_ct_add_synproxy(ct, tmpl)) {
nf_conntrack_free(ct);
return ERR_PTR(-ENOMEM);
}
timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
if (timeout_ext)
nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout),
GFP_ATOMIC);
nf_ct_acct_ext_add(ct, GFP_ATOMIC);
nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
nf_ct_labels_ext_add(ct);
#ifdef CONFIG_NF_CONNTRACK_EVENTS
ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
if ((ecache || net->ct.sysctl_events) &&
!nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
ecache ? ecache->expmask : 0,
GFP_ATOMIC)) {
nf_conntrack_free(ct);
return ERR_PTR(-ENOMEM);
}
#endif
cnet = nf_ct_pernet(net);
if (cnet->expect_count) {
spin_lock_bh(&nf_conntrack_expect_lock);
exp = nf_ct_find_expectation(net, zone, tuple, !tmpl || nf_ct_is_confirmed(tmpl));
if (exp) {
/* Welcome, Mr. Bond. We've been expecting you... */
__set_bit(IPS_EXPECTED_BIT, &ct->status);
/* exp->master safe, refcnt bumped in nf_ct_find_expectation */
ct->master = exp->master;
if (exp->helper) {
help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
if (help)
rcu_assign_pointer(help->helper, exp->helper);
}
#ifdef CONFIG_NF_CONNTRACK_MARK
ct->mark = READ_ONCE(exp->master->mark);
#endif
#ifdef CONFIG_NF_CONNTRACK_SECMARK
ct->secmark = exp->master->secmark;
#endif
NF_CT_STAT_INC(net, expect_new);
}
spin_unlock_bh(&nf_conntrack_expect_lock);
}
if (!exp && tmpl)
__nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
/* Other CPU might have obtained a pointer to this object before it was
* released. Because refcount is 0, refcount_inc_not_zero() will fail.
*
* After refcount_set(1) it will succeed; ensure that zeroing of
* ct->status and the correct ct->net pointer are visible; else other
* core might observe CONFIRMED bit which means the entry is valid and
* in the hash table, but its not (anymore).
*/
smp_wmb();
/* Now it is going to be associated with an sk_buff, set refcount to 1. */
refcount_set(&ct->ct_general.use, 1);
if (exp) {
if (exp->expectfn)
exp->expectfn(ct, exp);
nf_ct_expect_put(exp);
}
return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
}
/* On success, returns 0, sets skb->_nfct | ctinfo */
static int
resolve_normal_ct(struct nf_conn *tmpl,
struct sk_buff *skb,
unsigned int dataoff,
u_int8_t protonum,
const struct nf_hook_state *state)
{
const struct nf_conntrack_zone *zone;
struct nf_conntrack_tuple tuple;
struct nf_conntrack_tuple_hash *h;
enum ip_conntrack_info ctinfo;
struct nf_conntrack_zone tmp;
u32 hash, zone_id, rid;
struct nf_conn *ct;
if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
dataoff, state->pf, protonum, state->net,
&tuple))
return 0;
/* look for tuple match */
zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
zone_id = nf_ct_zone_id(zone, IP_CT_DIR_ORIGINAL);
hash = hash_conntrack_raw(&tuple, zone_id, state->net);
h = __nf_conntrack_find_get(state->net, zone, &tuple, hash);
if (!h) {
rid = nf_ct_zone_id(zone, IP_CT_DIR_REPLY);
if (zone_id != rid) {
u32 tmp = hash_conntrack_raw(&tuple, rid, state->net);
h = __nf_conntrack_find_get(state->net, zone, &tuple, tmp);
}
}
if (!h) {
h = init_conntrack(state->net, tmpl, &tuple,
skb, dataoff, hash);
if (!h)
return 0;
if (IS_ERR(h))
return PTR_ERR(h);
}
ct = nf_ct_tuplehash_to_ctrack(h);
/* It exists; we have (non-exclusive) reference. */
if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
ctinfo = IP_CT_ESTABLISHED_REPLY;
} else {
unsigned long status = READ_ONCE(ct->status);
/* Once we've had two way comms, always ESTABLISHED. */
if (likely(status & IPS_SEEN_REPLY))
ctinfo = IP_CT_ESTABLISHED;
else if (status & IPS_EXPECTED)
ctinfo = IP_CT_RELATED;
else
ctinfo = IP_CT_NEW;
}
nf_ct_set(skb, ct, ctinfo);
return 0;
}
/*
* icmp packets need special treatment to handle error messages that are
* related to a connection.
*
* Callers need to check if skb has a conntrack assigned when this
* helper returns; in such case skb belongs to an already known connection.
*/
static unsigned int __cold
nf_conntrack_handle_icmp(struct nf_conn *tmpl,
struct sk_buff *skb,
unsigned int dataoff,
u8 protonum,
const struct nf_hook_state *state)
{
int ret;
if (state->pf == NFPROTO_IPV4 && protonum == IPPROTO_ICMP)
ret = nf_conntrack_icmpv4_error(tmpl, skb, dataoff, state);
#if IS_ENABLED(CONFIG_IPV6)
else if (state->pf == NFPROTO_IPV6 && protonum == IPPROTO_ICMPV6)
ret = nf_conntrack_icmpv6_error(tmpl, skb, dataoff, state);
#endif
else
return NF_ACCEPT;
if (ret <= 0)
NF_CT_STAT_INC_ATOMIC(state->net, error);
return ret;
}
static int generic_packet(struct nf_conn *ct, struct sk_buff *skb,
enum ip_conntrack_info ctinfo)
{
const unsigned int *timeout = nf_ct_timeout_lookup(ct);
if (!timeout)
timeout = &nf_generic_pernet(nf_ct_net(ct))->timeout;
nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
return NF_ACCEPT;
}
/* Returns verdict for packet, or -1 for invalid. */
static int nf_conntrack_handle_packet(struct nf_conn *ct,
struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
const struct nf_hook_state *state)
{
switch (nf_ct_protonum(ct)) {
case IPPROTO_TCP:
return nf_conntrack_tcp_packet(ct, skb, dataoff,
ctinfo, state);
case IPPROTO_UDP:
return nf_conntrack_udp_packet(ct, skb, dataoff,
ctinfo, state);
case IPPROTO_ICMP:
return nf_conntrack_icmp_packet(ct, skb, ctinfo, state);
#if IS_ENABLED(CONFIG_IPV6)
case IPPROTO_ICMPV6:
return nf_conntrack_icmpv6_packet(ct, skb, ctinfo, state);
#endif
#ifdef CONFIG_NF_CT_PROTO_UDPLITE
case IPPROTO_UDPLITE:
return nf_conntrack_udplite_packet(ct, skb, dataoff,
ctinfo, state);
#endif
#ifdef CONFIG_NF_CT_PROTO_SCTP
case IPPROTO_SCTP:
return nf_conntrack_sctp_packet(ct, skb, dataoff,
ctinfo, state);
#endif
#ifdef CONFIG_NF_CT_PROTO_DCCP
case IPPROTO_DCCP:
return nf_conntrack_dccp_packet(ct, skb, dataoff,
ctinfo, state);
#endif
#ifdef CONFIG_NF_CT_PROTO_GRE
case IPPROTO_GRE:
return nf_conntrack_gre_packet(ct, skb, dataoff,
ctinfo, state);
#endif
}
return generic_packet(ct, skb, ctinfo);
}
unsigned int
nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct, *tmpl;
u_int8_t protonum;
int dataoff, ret;
tmpl = nf_ct_get(skb, &ctinfo);
if (tmpl || ctinfo == IP_CT_UNTRACKED) {
/* Previously seen (loopback or untracked)? Ignore. */
if ((tmpl && !nf_ct_is_template(tmpl)) ||
ctinfo == IP_CT_UNTRACKED)
return NF_ACCEPT;
skb->_nfct = 0;
}
/* rcu_read_lock()ed by nf_hook_thresh */
dataoff = get_l4proto(skb, skb_network_offset(skb), state->pf, &protonum);
if (dataoff <= 0) {
NF_CT_STAT_INC_ATOMIC(state->net, invalid);
ret = NF_ACCEPT;
goto out;
}
if (protonum == IPPROTO_ICMP || protonum == IPPROTO_ICMPV6) {
ret = nf_conntrack_handle_icmp(tmpl, skb, dataoff,
protonum, state);
if (ret <= 0) {
ret = -ret;
goto out;
}
/* ICMP[v6] protocol trackers may assign one conntrack. */
if (skb->_nfct)
goto out;
}
repeat:
ret = resolve_normal_ct(tmpl, skb, dataoff,
protonum, state);
if (ret < 0) {
/* Too stressed to deal. */
NF_CT_STAT_INC_ATOMIC(state->net, drop);
ret = NF_DROP;
goto out;
}
ct = nf_ct_get(skb, &ctinfo);
if (!ct) {
/* Not valid part of a connection */
NF_CT_STAT_INC_ATOMIC(state->net, invalid);
ret = NF_ACCEPT;
goto out;
}
ret = nf_conntrack_handle_packet(ct, skb, dataoff, ctinfo, state);
if (ret <= 0) {
/* Invalid: inverse of the return code tells
* the netfilter core what to do */
nf_ct_put(ct);
skb->_nfct = 0;
/* Special case: TCP tracker reports an attempt to reopen a
* closed/aborted connection. We have to go back and create a
* fresh conntrack.
*/
if (ret == -NF_REPEAT)
goto repeat;
NF_CT_STAT_INC_ATOMIC(state->net, invalid);
if (ret == -NF_DROP)
NF_CT_STAT_INC_ATOMIC(state->net, drop);
ret = -ret;
goto out;
}
if (ctinfo == IP_CT_ESTABLISHED_REPLY &&
!test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
nf_conntrack_event_cache(IPCT_REPLY, ct);
out:
if (tmpl)
nf_ct_put(tmpl);
return ret;
}
EXPORT_SYMBOL_GPL(nf_conntrack_in);
/* Alter reply tuple (maybe alter helper). This is for NAT, and is
implicitly racy: see __nf_conntrack_confirm */
void nf_conntrack_alter_reply(struct nf_conn *ct,
const struct nf_conntrack_tuple *newreply)
{
struct nf_conn_help *help = nfct_help(ct);
/* Should be unconfirmed, so not in hash table yet */
WARN_ON(nf_ct_is_confirmed(ct));
nf_ct_dump_tuple(newreply);
ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
if (ct->master || (help && !hlist_empty(&help->expectations)))
return;
}
EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
void __nf_ct_refresh_acct(struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
const struct sk_buff *skb,
u32 extra_jiffies,
bool do_acct)
{
/* Only update if this is not a fixed timeout */
if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
goto acct;
/* If not in hash table, timer will not be active yet */
if (nf_ct_is_confirmed(ct))
extra_jiffies += nfct_time_stamp;
if (READ_ONCE(ct->timeout) != extra_jiffies)
WRITE_ONCE(ct->timeout, extra_jiffies);
acct:
if (do_acct)
nf_ct_acct_update(ct, CTINFO2DIR(ctinfo), skb->len);
}
EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
bool nf_ct_kill_acct(struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
const struct sk_buff *skb)
{
nf_ct_acct_update(ct, CTINFO2DIR(ctinfo), skb->len);
return nf_ct_delete(ct, 0, 0);
}
EXPORT_SYMBOL_GPL(nf_ct_kill_acct);
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
#include <linux/mutex.h>
/* Generic function for tcp/udp/sctp/dccp and alike. */
int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
const struct nf_conntrack_tuple *tuple)
{
if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
[CTA_PROTO_SRC_PORT] = { .type = NLA_U16 },
[CTA_PROTO_DST_PORT] = { .type = NLA_U16 },
};
EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
struct nf_conntrack_tuple *t,
u_int32_t flags)
{
if (flags & CTA_FILTER_FLAG(CTA_PROTO_SRC_PORT)) {
if (!tb[CTA_PROTO_SRC_PORT])
return -EINVAL;
t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
}
if (flags & CTA_FILTER_FLAG(CTA_PROTO_DST_PORT)) {
if (!tb[CTA_PROTO_DST_PORT])
return -EINVAL;
t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
}
return 0;
}
EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
unsigned int nf_ct_port_nlattr_tuple_size(void)
{
static unsigned int size __read_mostly;
if (!size)
size = nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
return size;
}
EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
#endif
/* Used by ipt_REJECT and ip6t_REJECT. */
static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
{
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
/* This ICMP is in reverse direction to the packet which caused it */
ct = nf_ct_get(skb, &ctinfo);
if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
ctinfo = IP_CT_RELATED_REPLY;
else
ctinfo = IP_CT_RELATED;
/* Attach to new skbuff, and increment count */
nf_ct_set(nskb, ct, ctinfo);
nf_conntrack_get(skb_nfct(nskb));
}
static int __nf_conntrack_update(struct net *net, struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo)
{
const struct nf_nat_hook *nat_hook;
struct nf_conntrack_tuple_hash *h;
struct nf_conntrack_tuple tuple;
unsigned int status;
int dataoff;
u16 l3num;
u8 l4num;
l3num = nf_ct_l3num(ct);
dataoff = get_l4proto(skb, skb_network_offset(skb), l3num, &l4num);
if (dataoff <= 0)
return -1;
if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num,
l4num, net, &tuple))
return -1;
if (ct->status & IPS_SRC_NAT) {
memcpy(tuple.src.u3.all,
ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.all,
sizeof(tuple.src.u3.all));
tuple.src.u.all =
ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.all;
}
if (ct->status & IPS_DST_NAT) {
memcpy(tuple.dst.u3.all,
ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.all,
sizeof(tuple.dst.u3.all));
tuple.dst.u.all =
ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all;
}
h = nf_conntrack_find_get(net, nf_ct_zone(ct), &tuple);
if (!h)
return 0;
/* Store status bits of the conntrack that is clashing to re-do NAT
* mangling according to what it has been done already to this packet.
*/
status = ct->status;
nf_ct_put(ct);
ct = nf_ct_tuplehash_to_ctrack(h);
nf_ct_set(skb, ct, ctinfo);
nat_hook = rcu_dereference(nf_nat_hook);
if (!nat_hook)
return 0;
if (status & IPS_SRC_NAT &&
nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_SRC,
IP_CT_DIR_ORIGINAL) == NF_DROP)
return -1;
if (status & IPS_DST_NAT &&
nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_DST,
IP_CT_DIR_ORIGINAL) == NF_DROP)
return -1;
return 0;
}
/* This packet is coming from userspace via nf_queue, complete the packet
* processing after the helper invocation in nf_confirm().
*/
static int nf_confirm_cthelper(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo)
{
const struct nf_conntrack_helper *helper;
const struct nf_conn_help *help;
int protoff;
help = nfct_help(ct);
if (!help)
return 0;
helper = rcu_dereference(help->helper);
if (!helper)
return 0;
if (!(helper->flags & NF_CT_HELPER_F_USERSPACE))
return 0;
switch (nf_ct_l3num(ct)) {
case NFPROTO_IPV4:
protoff = skb_network_offset(skb) + ip_hdrlen(skb);
break;
#if IS_ENABLED(CONFIG_IPV6)
case NFPROTO_IPV6: {
__be16 frag_off;
u8 pnum;
pnum = ipv6_hdr(skb)->nexthdr;
protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum,
&frag_off);
if (protoff < 0 || (frag_off & htons(~0x7)) != 0)
return 0;
break;
}
#endif
default:
return 0;
}
if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
!nf_is_loopback_packet(skb)) {
if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) {
NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
return -1;
}
}
/* We've seen it coming out the other side: confirm it */
return nf_conntrack_confirm(skb) == NF_DROP ? - 1 : 0;
}
static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
int err;
ct = nf_ct_get(skb, &ctinfo);
if (!ct)
return 0;
if (!nf_ct_is_confirmed(ct)) {
err = __nf_conntrack_update(net, skb, ct, ctinfo);
if (err < 0)
return err;
ct = nf_ct_get(skb, &ctinfo);
}
return nf_confirm_cthelper(skb, ct, ctinfo);
}
static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
const struct sk_buff *skb)
{
const struct nf_conntrack_tuple *src_tuple;
const struct nf_conntrack_tuple_hash *hash;
struct nf_conntrack_tuple srctuple;
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
ct = nf_ct_get(skb, &ctinfo);
if (ct) {
src_tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo));
memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
return true;
}
if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
NFPROTO_IPV4, dev_net(skb->dev),
&srctuple))
return false;
hash = nf_conntrack_find_get(dev_net(skb->dev),
&nf_ct_zone_dflt,
&srctuple);
if (!hash)
return false;
ct = nf_ct_tuplehash_to_ctrack(hash);
src_tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir);
memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
nf_ct_put(ct);
return true;
}
/* Bring out ya dead! */
static struct nf_conn *
get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
const struct nf_ct_iter_data *iter_data, unsigned int *bucket)
{
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
struct hlist_nulls_node *n;
spinlock_t *lockp;
for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
struct hlist_nulls_head *hslot = &nf_conntrack_hash[*bucket];
if (hlist_nulls_empty(hslot))
continue;
lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
local_bh_disable();
nf_conntrack_lock(lockp);
hlist_nulls_for_each_entry(h, n, hslot, hnnode) {
if (NF_CT_DIRECTION(h) != IP_CT_DIR_REPLY)
continue;
/* All nf_conn objects are added to hash table twice, one
* for original direction tuple, once for the reply tuple.
*
* Exception: In the IPS_NAT_CLASH case, only the reply
* tuple is added (the original tuple already existed for
* a different object).
*
* We only need to call the iterator once for each
* conntrack, so we just use the 'reply' direction
* tuple while iterating.
*/
ct = nf_ct_tuplehash_to_ctrack(h);
if (iter_data->net &&
!net_eq(iter_data->net, nf_ct_net(ct)))
continue;
if (iter(ct, iter_data->data))
goto found;
}
spin_unlock(lockp);
local_bh_enable();
cond_resched();
}
return NULL;
found:
refcount_inc(&ct->ct_general.use);
spin_unlock(lockp);
local_bh_enable();
return ct;
}
static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data),
const struct nf_ct_iter_data *iter_data)
{
unsigned int bucket = 0;
struct nf_conn *ct;
might_sleep();
mutex_lock(&nf_conntrack_mutex);
while ((ct = get_next_corpse(iter, iter_data, &bucket)) != NULL) {
/* Time to push up daises... */
nf_ct_delete(ct, iter_data->portid, iter_data->report);
nf_ct_put(ct);
cond_resched();
}
mutex_unlock(&nf_conntrack_mutex);
}
void nf_ct_iterate_cleanup_net(int (*iter)(struct nf_conn *i, void *data),
const struct nf_ct_iter_data *iter_data)
{
struct net *net = iter_data->net;
struct nf_conntrack_net *cnet = nf_ct_pernet(net);
might_sleep();
if (atomic_read(&cnet->count) == 0)
return;
nf_ct_iterate_cleanup(iter, iter_data);
}
EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup_net);
/**
* nf_ct_iterate_destroy - destroy unconfirmed conntracks and iterate table
* @iter: callback to invoke for each conntrack
* @data: data to pass to @iter
*
* Like nf_ct_iterate_cleanup, but first marks conntracks on the
* unconfirmed list as dying (so they will not be inserted into
* main table).
*
* Can only be called in module exit path.
*/
void
nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), void *data)
{
struct nf_ct_iter_data iter_data = {};
struct net *net;
down_read(&net_rwsem);
for_each_net(net) {
struct nf_conntrack_net *cnet = nf_ct_pernet(net);
if (atomic_read(&cnet->count) == 0)
continue;
nf_queue_nf_hook_drop(net);
}
up_read(&net_rwsem);
/* Need to wait for netns cleanup worker to finish, if its
* running -- it might have deleted a net namespace from
* the global list, so hook drop above might not have
* affected all namespaces.
*/
net_ns_barrier();
/* a skb w. unconfirmed conntrack could have been reinjected just
* before we called nf_queue_nf_hook_drop().
*
* This makes sure its inserted into conntrack table.
*/
synchronize_net();
nf_ct_ext_bump_genid();
iter_data.data = data;
nf_ct_iterate_cleanup(iter, &iter_data);
/* Another cpu might be in a rcu read section with
* rcu protected pointer cleared in iter callback
* or hidden via nf_ct_ext_bump_genid() above.
*
* Wait until those are done.
*/
synchronize_rcu();
}
EXPORT_SYMBOL_GPL(nf_ct_iterate_destroy);
static int kill_all(struct nf_conn *i, void *data)
{
return 1;
}
void nf_conntrack_cleanup_start(void)
{
cleanup_nf_conntrack_bpf();
conntrack_gc_work.exiting = true;
}
void nf_conntrack_cleanup_end(void)
{
RCU_INIT_POINTER(nf_ct_hook, NULL);
cancel_delayed_work_sync(&conntrack_gc_work.dwork);
kvfree(nf_conntrack_hash);
nf_conntrack_proto_fini();
nf_conntrack_helper_fini();
nf_conntrack_expect_fini();
kmem_cache_destroy(nf_conntrack_cachep);
}
/*
* Mishearing the voices in his head, our hero wonders how he's
* supposed to kill the mall.
*/
void nf_conntrack_cleanup_net(struct net *net)
{
LIST_HEAD(single);
list_add(&net->exit_list, &single);
nf_conntrack_cleanup_net_list(&single);
}
void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
{
struct nf_ct_iter_data iter_data = {};
struct net *net;
int busy;
/*
* This makes sure all current packets have passed through
* netfilter framework. Roll on, two-stage module
* delete...
*/
synchronize_net();
i_see_dead_people:
busy = 0;
list_for_each_entry(net, net_exit_list, exit_list) {
struct nf_conntrack_net *cnet = nf_ct_pernet(net);
iter_data.net = net;
nf_ct_iterate_cleanup_net(kill_all, &iter_data);
if (atomic_read(&cnet->count) != 0)
busy = 1;
}
if (busy) {
schedule();
goto i_see_dead_people;
}
list_for_each_entry(net, net_exit_list, exit_list) {
nf_conntrack_ecache_pernet_fini(net);
nf_conntrack_expect_pernet_fini(net);
free_percpu(net->ct.stat);
}
}
void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
{
struct hlist_nulls_head *hash;
unsigned int nr_slots, i;
if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head)))
return NULL;
BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
hash = kvcalloc(nr_slots, sizeof(struct hlist_nulls_head), GFP_KERNEL);
if (hash && nulls)
for (i = 0; i < nr_slots; i++)
INIT_HLIST_NULLS_HEAD(&hash[i], i);
return hash;
}
EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
int nf_conntrack_hash_resize(unsigned int hashsize)
{
int i, bucket;
unsigned int old_size;
struct hlist_nulls_head *hash, *old_hash;
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
if (!hashsize)
return -EINVAL;
hash = nf_ct_alloc_hashtable(&hashsize, 1);
if (!hash)
return -ENOMEM;
mutex_lock(&nf_conntrack_mutex);
old_size = nf_conntrack_htable_size;
if (old_size == hashsize) {
mutex_unlock(&nf_conntrack_mutex);
kvfree(hash);
return 0;
}
local_bh_disable();
nf_conntrack_all_lock();
write_seqcount_begin(&nf_conntrack_generation);
/* Lookups in the old hash might happen in parallel, which means we
* might get false negatives during connection lookup. New connections
* created because of a false negative won't make it into the hash
* though since that required taking the locks.
*/
for (i = 0; i < nf_conntrack_htable_size; i++) {
while (!hlist_nulls_empty(&nf_conntrack_hash[i])) {
unsigned int zone_id;
h = hlist_nulls_entry(nf_conntrack_hash[i].first,
struct nf_conntrack_tuple_hash, hnnode);
ct = nf_ct_tuplehash_to_ctrack(h);
hlist_nulls_del_rcu(&h->hnnode);
zone_id = nf_ct_zone_id(nf_ct_zone(ct), NF_CT_DIRECTION(h));
bucket = __hash_conntrack(nf_ct_net(ct),
&h->tuple, zone_id, hashsize);
hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
}
}
old_hash = nf_conntrack_hash;
nf_conntrack_hash = hash;
nf_conntrack_htable_size = hashsize;
write_seqcount_end(&nf_conntrack_generation);
nf_conntrack_all_unlock();
local_bh_enable();
mutex_unlock(&nf_conntrack_mutex);
synchronize_net();
kvfree(old_hash);
return 0;
}
int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp)
{
unsigned int hashsize;
int rc;
if (current->nsproxy->net_ns != &init_net)
return -EOPNOTSUPP;
/* On boot, we can set this without any fancy locking. */
if (!nf_conntrack_hash)
return param_set_uint(val, kp);
rc = kstrtouint(val, 0, &hashsize);
if (rc)
return rc;
return nf_conntrack_hash_resize(hashsize);
}
int nf_conntrack_init_start(void)
{
unsigned long nr_pages = totalram_pages();
int max_factor = 8;
int ret = -ENOMEM;
int i;
seqcount_spinlock_init(&nf_conntrack_generation,
&nf_conntrack_locks_all_lock);
for (i = 0; i < CONNTRACK_LOCKS; i++)
spin_lock_init(&nf_conntrack_locks[i]);
if (!nf_conntrack_htable_size) {
nf_conntrack_htable_size
= (((nr_pages << PAGE_SHIFT) / 16384)
/ sizeof(struct hlist_head));
if (BITS_PER_LONG >= 64 &&
nr_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE)))
nf_conntrack_htable_size = 262144;
else if (nr_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
nf_conntrack_htable_size = 65536;
if (nf_conntrack_htable_size < 1024)
nf_conntrack_htable_size = 1024;
/* Use a max. factor of one by default to keep the average
* hash chain length at 2 entries. Each entry has to be added
* twice (once for original direction, once for reply).
* When a table size is given we use the old value of 8 to
* avoid implicit reduction of the max entries setting.
*/
max_factor = 1;
}
nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1);
if (!nf_conntrack_hash)
return -ENOMEM;
nf_conntrack_max = max_factor * nf_conntrack_htable_size;
nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
sizeof(struct nf_conn),
NFCT_INFOMASK + 1,
SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN, NULL);
if (!nf_conntrack_cachep)
goto err_cachep;
ret = nf_conntrack_expect_init();
if (ret < 0)
goto err_expect;
ret = nf_conntrack_helper_init();
if (ret < 0)
goto err_helper;
ret = nf_conntrack_proto_init();
if (ret < 0)
goto err_proto;
conntrack_gc_work_init(&conntrack_gc_work);
queue_delayed_work(system_power_efficient_wq, &conntrack_gc_work.dwork, HZ);
ret = register_nf_conntrack_bpf();
if (ret < 0)
goto err_kfunc;
return 0;
err_kfunc:
cancel_delayed_work_sync(&conntrack_gc_work.dwork);
nf_conntrack_proto_fini();
err_proto:
nf_conntrack_helper_fini();
err_helper:
nf_conntrack_expect_fini();
err_expect:
kmem_cache_destroy(nf_conntrack_cachep);
err_cachep:
kvfree(nf_conntrack_hash);
return ret;
}
static void nf_conntrack_set_closing(struct nf_conntrack *nfct)
{
struct nf_conn *ct = nf_ct_to_nf_conn(nfct);
switch (nf_ct_protonum(ct)) {
case IPPROTO_TCP:
nf_conntrack_tcp_set_closing(ct);
break;
}
}
static const struct nf_ct_hook nf_conntrack_hook = {
.update = nf_conntrack_update,
.destroy = nf_ct_destroy,
.get_tuple_skb = nf_conntrack_get_tuple_skb,
.attach = nf_conntrack_attach,
.set_closing = nf_conntrack_set_closing,
};
void nf_conntrack_init_end(void)
{
RCU_INIT_POINTER(nf_ct_hook, &nf_conntrack_hook);
}
/*
* We need to use special "null" values, not used in hash table
*/
#define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
int nf_conntrack_init_net(struct net *net)
{
struct nf_conntrack_net *cnet = nf_ct_pernet(net);
int ret = -ENOMEM;
BUILD_BUG_ON(IP_CT_UNTRACKED == IP_CT_NUMBER);
BUILD_BUG_ON_NOT_POWER_OF_2(CONNTRACK_LOCKS);
atomic_set(&cnet->count, 0);
net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
if (!net->ct.stat)
return ret;
ret = nf_conntrack_expect_pernet_init(net);
if (ret < 0)
goto err_expect;
nf_conntrack_acct_pernet_init(net);
nf_conntrack_tstamp_pernet_init(net);
nf_conntrack_ecache_pernet_init(net);
nf_conntrack_proto_pernet_init(net);
return 0;
err_expect:
free_percpu(net->ct.stat);
return ret;
}
/* ctnetlink code shared by both ctnetlink and nf_conntrack_bpf */
int __nf_ct_change_timeout(struct nf_conn *ct, u64 timeout)
{
if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
return -EPERM;
__nf_ct_set_timeout(ct, timeout);
if (test_bit(IPS_DYING_BIT, &ct->status))
return -ETIME;
return 0;
}
EXPORT_SYMBOL_GPL(__nf_ct_change_timeout);
void __nf_ct_change_status(struct nf_conn *ct, unsigned long on, unsigned long off)
{
unsigned int bit;
/* Ignore these unchangable bits */
on &= ~IPS_UNCHANGEABLE_MASK;
off &= ~IPS_UNCHANGEABLE_MASK;
for (bit = 0; bit < __IPS_MAX_BIT; bit++) {
if (on & (1 << bit))
set_bit(bit, &ct->status);
else if (off & (1 << bit))
clear_bit(bit, &ct->status);
}
}
EXPORT_SYMBOL_GPL(__nf_ct_change_status);
int nf_ct_change_status_common(struct nf_conn *ct, unsigned int status)
{
unsigned long d;
d = ct->status ^ status;
if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING))
/* unchangeable */
return -EBUSY;
if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY))
/* SEEN_REPLY bit can only be set */
return -EBUSY;
if (d & IPS_ASSURED && !(status & IPS_ASSURED))
/* ASSURED bit can only be set */
return -EBUSY;
__nf_ct_change_status(ct, status, 0);
return 0;
}
EXPORT_SYMBOL_GPL(nf_ct_change_status_common);
| linux-master | net/netfilter/nf_conntrack_core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008 Patrick McHardy <[email protected]>
*
* Development of this code funded by Astaro AG (http://www.astaro.com/)
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/rculist.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/static_key.h>
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_log.h>
#include <net/netfilter/nft_meta.h>
#if defined(CONFIG_RETPOLINE) && defined(CONFIG_X86)
static struct static_key_false nf_tables_skip_direct_calls;
static bool nf_skip_indirect_calls(void)
{
return static_branch_likely(&nf_tables_skip_direct_calls);
}
static void __init nf_skip_indirect_calls_enable(void)
{
if (!cpu_feature_enabled(X86_FEATURE_RETPOLINE))
static_branch_enable(&nf_tables_skip_direct_calls);
}
#else
static inline bool nf_skip_indirect_calls(void) { return false; }
static inline void nf_skip_indirect_calls_enable(void) { }
#endif
static noinline void __nft_trace_packet(const struct nft_pktinfo *pkt,
const struct nft_verdict *verdict,
const struct nft_rule_dp *rule,
struct nft_traceinfo *info,
enum nft_trace_types type)
{
if (!info->trace || !info->nf_trace)
return;
info->type = type;
nft_trace_notify(pkt, verdict, rule, info);
}
static inline void nft_trace_packet(const struct nft_pktinfo *pkt,
struct nft_verdict *verdict,
struct nft_traceinfo *info,
const struct nft_rule_dp *rule,
enum nft_trace_types type)
{
if (static_branch_unlikely(&nft_trace_enabled)) {
info->nf_trace = pkt->skb->nf_trace;
__nft_trace_packet(pkt, verdict, rule, info, type);
}
}
static inline void nft_trace_copy_nftrace(const struct nft_pktinfo *pkt,
struct nft_traceinfo *info)
{
if (static_branch_unlikely(&nft_trace_enabled))
info->nf_trace = pkt->skb->nf_trace;
}
static void nft_bitwise_fast_eval(const struct nft_expr *expr,
struct nft_regs *regs)
{
const struct nft_bitwise_fast_expr *priv = nft_expr_priv(expr);
u32 *src = ®s->data[priv->sreg];
u32 *dst = ®s->data[priv->dreg];
*dst = (*src & priv->mask) ^ priv->xor;
}
static void nft_cmp_fast_eval(const struct nft_expr *expr,
struct nft_regs *regs)
{
const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
if (((regs->data[priv->sreg] & priv->mask) == priv->data) ^ priv->inv)
return;
regs->verdict.code = NFT_BREAK;
}
static void nft_cmp16_fast_eval(const struct nft_expr *expr,
struct nft_regs *regs)
{
const struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr);
const u64 *reg_data = (const u64 *)®s->data[priv->sreg];
const u64 *mask = (const u64 *)&priv->mask;
const u64 *data = (const u64 *)&priv->data;
if (((reg_data[0] & mask[0]) == data[0] &&
((reg_data[1] & mask[1]) == data[1])) ^ priv->inv)
return;
regs->verdict.code = NFT_BREAK;
}
static noinline void __nft_trace_verdict(const struct nft_pktinfo *pkt,
struct nft_traceinfo *info,
const struct nft_rule_dp *rule,
const struct nft_regs *regs)
{
enum nft_trace_types type;
switch (regs->verdict.code) {
case NFT_CONTINUE:
case NFT_RETURN:
type = NFT_TRACETYPE_RETURN;
break;
case NF_STOLEN:
type = NFT_TRACETYPE_RULE;
/* can't access skb->nf_trace; use copy */
break;
default:
type = NFT_TRACETYPE_RULE;
if (info->trace)
info->nf_trace = pkt->skb->nf_trace;
break;
}
__nft_trace_packet(pkt, ®s->verdict, rule, info, type);
}
static inline void nft_trace_verdict(const struct nft_pktinfo *pkt,
struct nft_traceinfo *info,
const struct nft_rule_dp *rule,
const struct nft_regs *regs)
{
if (static_branch_unlikely(&nft_trace_enabled))
__nft_trace_verdict(pkt, info, rule, regs);
}
static bool nft_payload_fast_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_payload *priv = nft_expr_priv(expr);
const struct sk_buff *skb = pkt->skb;
u32 *dest = ®s->data[priv->dreg];
unsigned char *ptr;
if (priv->base == NFT_PAYLOAD_NETWORK_HEADER)
ptr = skb_network_header(skb);
else {
if (!(pkt->flags & NFT_PKTINFO_L4PROTO))
return false;
ptr = skb_network_header(skb) + nft_thoff(pkt);
}
ptr += priv->offset;
if (unlikely(ptr + priv->len > skb_tail_pointer(skb)))
return false;
*dest = 0;
if (priv->len == 2)
*(u16 *)dest = *(u16 *)ptr;
else if (priv->len == 4)
*(u32 *)dest = *(u32 *)ptr;
else
*(u8 *)dest = *(u8 *)ptr;
return true;
}
DEFINE_STATIC_KEY_FALSE(nft_counters_enabled);
static noinline void nft_update_chain_stats(const struct nft_chain *chain,
const struct nft_pktinfo *pkt)
{
struct nft_base_chain *base_chain;
struct nft_stats __percpu *pstats;
struct nft_stats *stats;
base_chain = nft_base_chain(chain);
pstats = READ_ONCE(base_chain->stats);
if (pstats) {
local_bh_disable();
stats = this_cpu_ptr(pstats);
u64_stats_update_begin(&stats->syncp);
stats->pkts++;
stats->bytes += pkt->skb->len;
u64_stats_update_end(&stats->syncp);
local_bh_enable();
}
}
struct nft_jumpstack {
const struct nft_rule_dp *rule;
};
static void expr_call_ops_eval(const struct nft_expr *expr,
struct nft_regs *regs,
struct nft_pktinfo *pkt)
{
#ifdef CONFIG_RETPOLINE
unsigned long e;
if (nf_skip_indirect_calls())
goto indirect_call;
e = (unsigned long)expr->ops->eval;
#define X(e, fun) \
do { if ((e) == (unsigned long)(fun)) \
return fun(expr, regs, pkt); } while (0)
X(e, nft_payload_eval);
X(e, nft_cmp_eval);
X(e, nft_counter_eval);
X(e, nft_meta_get_eval);
X(e, nft_lookup_eval);
#if IS_ENABLED(CONFIG_NFT_CT)
X(e, nft_ct_get_fast_eval);
#endif
X(e, nft_range_eval);
X(e, nft_immediate_eval);
X(e, nft_byteorder_eval);
X(e, nft_dynset_eval);
X(e, nft_rt_get_eval);
X(e, nft_bitwise_eval);
X(e, nft_objref_eval);
X(e, nft_objref_map_eval);
#undef X
indirect_call:
#endif /* CONFIG_RETPOLINE */
expr->ops->eval(expr, regs, pkt);
}
#define nft_rule_expr_first(rule) (struct nft_expr *)&rule->data[0]
#define nft_rule_expr_next(expr) ((void *)expr) + expr->ops->size
#define nft_rule_expr_last(rule) (struct nft_expr *)&rule->data[rule->dlen]
#define nft_rule_dp_for_each_expr(expr, last, rule) \
for ((expr) = nft_rule_expr_first(rule), (last) = nft_rule_expr_last(rule); \
(expr) != (last); \
(expr) = nft_rule_expr_next(expr))
unsigned int
nft_do_chain(struct nft_pktinfo *pkt, void *priv)
{
const struct nft_chain *chain = priv, *basechain = chain;
const struct net *net = nft_net(pkt);
const struct nft_expr *expr, *last;
const struct nft_rule_dp *rule;
struct nft_regs regs = {};
unsigned int stackptr = 0;
struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE];
bool genbit = READ_ONCE(net->nft.gencursor);
struct nft_rule_blob *blob;
struct nft_traceinfo info;
info.trace = false;
if (static_branch_unlikely(&nft_trace_enabled))
nft_trace_init(&info, pkt, basechain);
do_chain:
if (genbit)
blob = rcu_dereference(chain->blob_gen_1);
else
blob = rcu_dereference(chain->blob_gen_0);
rule = (struct nft_rule_dp *)blob->data;
next_rule:
regs.verdict.code = NFT_CONTINUE;
for (; !rule->is_last ; rule = nft_rule_next(rule)) {
nft_rule_dp_for_each_expr(expr, last, rule) {
if (expr->ops == &nft_cmp_fast_ops)
nft_cmp_fast_eval(expr, ®s);
else if (expr->ops == &nft_cmp16_fast_ops)
nft_cmp16_fast_eval(expr, ®s);
else if (expr->ops == &nft_bitwise_fast_ops)
nft_bitwise_fast_eval(expr, ®s);
else if (expr->ops != &nft_payload_fast_ops ||
!nft_payload_fast_eval(expr, ®s, pkt))
expr_call_ops_eval(expr, ®s, pkt);
if (regs.verdict.code != NFT_CONTINUE)
break;
}
switch (regs.verdict.code) {
case NFT_BREAK:
regs.verdict.code = NFT_CONTINUE;
nft_trace_copy_nftrace(pkt, &info);
continue;
case NFT_CONTINUE:
nft_trace_packet(pkt, ®s.verdict, &info, rule,
NFT_TRACETYPE_RULE);
continue;
}
break;
}
nft_trace_verdict(pkt, &info, rule, ®s);
switch (regs.verdict.code & NF_VERDICT_MASK) {
case NF_ACCEPT:
case NF_DROP:
case NF_QUEUE:
case NF_STOLEN:
return regs.verdict.code;
}
switch (regs.verdict.code) {
case NFT_JUMP:
if (WARN_ON_ONCE(stackptr >= NFT_JUMP_STACK_SIZE))
return NF_DROP;
jumpstack[stackptr].rule = nft_rule_next(rule);
stackptr++;
fallthrough;
case NFT_GOTO:
chain = regs.verdict.chain;
goto do_chain;
case NFT_CONTINUE:
case NFT_RETURN:
break;
default:
WARN_ON_ONCE(1);
}
if (stackptr > 0) {
stackptr--;
rule = jumpstack[stackptr].rule;
goto next_rule;
}
nft_trace_packet(pkt, ®s.verdict, &info, NULL, NFT_TRACETYPE_POLICY);
if (static_branch_unlikely(&nft_counters_enabled))
nft_update_chain_stats(basechain, pkt);
return nft_base_chain(basechain)->policy;
}
EXPORT_SYMBOL_GPL(nft_do_chain);
static struct nft_expr_type *nft_basic_types[] = {
&nft_imm_type,
&nft_cmp_type,
&nft_lookup_type,
&nft_bitwise_type,
&nft_byteorder_type,
&nft_payload_type,
&nft_dynset_type,
&nft_range_type,
&nft_meta_type,
&nft_rt_type,
&nft_exthdr_type,
&nft_last_type,
&nft_counter_type,
&nft_objref_type,
&nft_inner_type,
};
static struct nft_object_type *nft_basic_objects[] = {
#ifdef CONFIG_NETWORK_SECMARK
&nft_secmark_obj_type,
#endif
&nft_counter_obj_type,
};
int __init nf_tables_core_module_init(void)
{
int err, i, j = 0;
nft_counter_init_seqcount();
for (i = 0; i < ARRAY_SIZE(nft_basic_objects); i++) {
err = nft_register_obj(nft_basic_objects[i]);
if (err)
goto err;
}
for (j = 0; j < ARRAY_SIZE(nft_basic_types); j++) {
err = nft_register_expr(nft_basic_types[j]);
if (err)
goto err;
}
nf_skip_indirect_calls_enable();
return 0;
err:
while (j-- > 0)
nft_unregister_expr(nft_basic_types[j]);
while (i-- > 0)
nft_unregister_obj(nft_basic_objects[i]);
return err;
}
void nf_tables_core_module_exit(void)
{
int i;
i = ARRAY_SIZE(nft_basic_types);
while (i-- > 0)
nft_unregister_expr(nft_basic_types[i]);
i = ARRAY_SIZE(nft_basic_objects);
while (i-- > 0)
nft_unregister_obj(nft_basic_objects[i]);
}
| linux-master | net/netfilter/nf_tables_core.c |
// SPDX-License-Identifier: GPL-2.0-only
/* FTP extension for connection tracking. */
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2004 Netfilter Core Team <[email protected]>
* (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
* (C) 2006-2012 Patrick McHardy <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/netfilter.h>
#include <linux/ip.h>
#include <linux/slab.h>
#include <linux/ipv6.h>
#include <linux/ctype.h>
#include <linux/inet.h>
#include <net/checksum.h>
#include <net/tcp.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <linux/netfilter/nf_conntrack_ftp.h>
#define HELPER_NAME "ftp"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Rusty Russell <[email protected]>");
MODULE_DESCRIPTION("ftp connection tracking helper");
MODULE_ALIAS("ip_conntrack_ftp");
MODULE_ALIAS_NFCT_HELPER(HELPER_NAME);
static DEFINE_SPINLOCK(nf_ftp_lock);
#define MAX_PORTS 8
static u_int16_t ports[MAX_PORTS];
static unsigned int ports_c;
module_param_array(ports, ushort, &ports_c, 0400);
static bool loose;
module_param(loose, bool, 0600);
unsigned int (*nf_nat_ftp_hook)(struct sk_buff *skb,
enum ip_conntrack_info ctinfo,
enum nf_ct_ftp_type type,
unsigned int protoff,
unsigned int matchoff,
unsigned int matchlen,
struct nf_conntrack_expect *exp);
EXPORT_SYMBOL_GPL(nf_nat_ftp_hook);
static int try_rfc959(const char *, size_t, struct nf_conntrack_man *,
char, unsigned int *);
static int try_rfc1123(const char *, size_t, struct nf_conntrack_man *,
char, unsigned int *);
static int try_eprt(const char *, size_t, struct nf_conntrack_man *,
char, unsigned int *);
static int try_epsv_response(const char *, size_t, struct nf_conntrack_man *,
char, unsigned int *);
static struct ftp_search {
const char *pattern;
size_t plen;
char skip;
char term;
enum nf_ct_ftp_type ftptype;
int (*getnum)(const char *, size_t, struct nf_conntrack_man *, char, unsigned int *);
} search[IP_CT_DIR_MAX][2] = {
[IP_CT_DIR_ORIGINAL] = {
{
.pattern = "PORT",
.plen = sizeof("PORT") - 1,
.skip = ' ',
.term = '\r',
.ftptype = NF_CT_FTP_PORT,
.getnum = try_rfc959,
},
{
.pattern = "EPRT",
.plen = sizeof("EPRT") - 1,
.skip = ' ',
.term = '\r',
.ftptype = NF_CT_FTP_EPRT,
.getnum = try_eprt,
},
},
[IP_CT_DIR_REPLY] = {
{
.pattern = "227 ",
.plen = sizeof("227 ") - 1,
.ftptype = NF_CT_FTP_PASV,
.getnum = try_rfc1123,
},
{
.pattern = "229 ",
.plen = sizeof("229 ") - 1,
.skip = '(',
.term = ')',
.ftptype = NF_CT_FTP_EPSV,
.getnum = try_epsv_response,
},
},
};
static int
get_ipv6_addr(const char *src, size_t dlen, struct in6_addr *dst, u_int8_t term)
{
const char *end;
int ret = in6_pton(src, min_t(size_t, dlen, 0xffff), (u8 *)dst, term, &end);
if (ret > 0)
return (int)(end - src);
return 0;
}
static int try_number(const char *data, size_t dlen, u_int32_t array[],
int array_size, char sep, char term)
{
u_int32_t i, len;
memset(array, 0, sizeof(array[0])*array_size);
/* Keep data pointing at next char. */
for (i = 0, len = 0; len < dlen && i < array_size; len++, data++) {
if (*data >= '0' && *data <= '9') {
array[i] = array[i]*10 + *data - '0';
}
else if (*data == sep)
i++;
else {
/* Unexpected character; true if it's the
terminator (or we don't care about one)
and we're finished. */
if ((*data == term || !term) && i == array_size - 1)
return len;
pr_debug("Char %u (got %u nums) `%u' unexpected\n",
len, i, *data);
return 0;
}
}
pr_debug("Failed to fill %u numbers separated by %c\n",
array_size, sep);
return 0;
}
/* Returns 0, or length of numbers: 192,168,1,1,5,6 */
static int try_rfc959(const char *data, size_t dlen,
struct nf_conntrack_man *cmd, char term,
unsigned int *offset)
{
int length;
u_int32_t array[6];
length = try_number(data, dlen, array, 6, ',', term);
if (length == 0)
return 0;
cmd->u3.ip = htonl((array[0] << 24) | (array[1] << 16) |
(array[2] << 8) | array[3]);
cmd->u.tcp.port = htons((array[4] << 8) | array[5]);
return length;
}
/*
* From RFC 1123:
* The format of the 227 reply to a PASV command is not
* well standardized. In particular, an FTP client cannot
* assume that the parentheses shown on page 40 of RFC-959
* will be present (and in fact, Figure 3 on page 43 omits
* them). Therefore, a User-FTP program that interprets
* the PASV reply must scan the reply for the first digit
* of the host and port numbers.
*/
static int try_rfc1123(const char *data, size_t dlen,
struct nf_conntrack_man *cmd, char term,
unsigned int *offset)
{
int i;
for (i = 0; i < dlen; i++)
if (isdigit(data[i]))
break;
if (i == dlen)
return 0;
*offset += i;
return try_rfc959(data + i, dlen - i, cmd, 0, offset);
}
/* Grab port: number up to delimiter */
static int get_port(const char *data, int start, size_t dlen, char delim,
__be16 *port)
{
u_int16_t tmp_port = 0;
int i;
for (i = start; i < dlen; i++) {
/* Finished? */
if (data[i] == delim) {
if (tmp_port == 0)
break;
*port = htons(tmp_port);
pr_debug("get_port: return %d\n", tmp_port);
return i + 1;
}
else if (data[i] >= '0' && data[i] <= '9')
tmp_port = tmp_port*10 + data[i] - '0';
else { /* Some other crap */
pr_debug("get_port: invalid char.\n");
break;
}
}
return 0;
}
/* Returns 0, or length of numbers: |1|132.235.1.2|6275| or |2|3ffe::1|6275| */
static int try_eprt(const char *data, size_t dlen, struct nf_conntrack_man *cmd,
char term, unsigned int *offset)
{
char delim;
int length;
/* First character is delimiter, then "1" for IPv4 or "2" for IPv6,
then delimiter again. */
if (dlen <= 3) {
pr_debug("EPRT: too short\n");
return 0;
}
delim = data[0];
if (isdigit(delim) || delim < 33 || delim > 126 || data[2] != delim) {
pr_debug("try_eprt: invalid delimiter.\n");
return 0;
}
if ((cmd->l3num == PF_INET && data[1] != '1') ||
(cmd->l3num == PF_INET6 && data[1] != '2')) {
pr_debug("EPRT: invalid protocol number.\n");
return 0;
}
pr_debug("EPRT: Got %c%c%c\n", delim, data[1], delim);
if (data[1] == '1') {
u_int32_t array[4];
/* Now we have IP address. */
length = try_number(data + 3, dlen - 3, array, 4, '.', delim);
if (length != 0)
cmd->u3.ip = htonl((array[0] << 24) | (array[1] << 16)
| (array[2] << 8) | array[3]);
} else {
/* Now we have IPv6 address. */
length = get_ipv6_addr(data + 3, dlen - 3,
(struct in6_addr *)cmd->u3.ip6, delim);
}
if (length == 0)
return 0;
pr_debug("EPRT: Got IP address!\n");
/* Start offset includes initial "|1|", and trailing delimiter */
return get_port(data, 3 + length + 1, dlen, delim, &cmd->u.tcp.port);
}
/* Returns 0, or length of numbers: |||6446| */
static int try_epsv_response(const char *data, size_t dlen,
struct nf_conntrack_man *cmd, char term,
unsigned int *offset)
{
char delim;
/* Three delimiters. */
if (dlen <= 3) return 0;
delim = data[0];
if (isdigit(delim) || delim < 33 || delim > 126 ||
data[1] != delim || data[2] != delim)
return 0;
return get_port(data, 3, dlen, delim, &cmd->u.tcp.port);
}
/* Return 1 for match, 0 for accept, -1 for partial. */
static int find_pattern(const char *data, size_t dlen,
const char *pattern, size_t plen,
char skip, char term,
unsigned int *numoff,
unsigned int *numlen,
struct nf_conntrack_man *cmd,
int (*getnum)(const char *, size_t,
struct nf_conntrack_man *, char,
unsigned int *))
{
size_t i = plen;
pr_debug("find_pattern `%s': dlen = %zu\n", pattern, dlen);
if (dlen <= plen) {
/* Short packet: try for partial? */
if (strncasecmp(data, pattern, dlen) == 0)
return -1;
else return 0;
}
if (strncasecmp(data, pattern, plen) != 0)
return 0;
pr_debug("Pattern matches!\n");
/* Now we've found the constant string, try to skip
to the 'skip' character */
if (skip) {
for (i = plen; data[i] != skip; i++)
if (i == dlen - 1) return -1;
/* Skip over the last character */
i++;
}
pr_debug("Skipped up to 0x%hhx delimiter!\n", skip);
*numoff = i;
*numlen = getnum(data + i, dlen - i, cmd, term, numoff);
if (!*numlen)
return -1;
pr_debug("Match succeeded!\n");
return 1;
}
/* Look up to see if we're just after a \n. */
static int find_nl_seq(u32 seq, const struct nf_ct_ftp_master *info, int dir)
{
unsigned int i;
for (i = 0; i < info->seq_aft_nl_num[dir]; i++)
if (info->seq_aft_nl[dir][i] == seq)
return 1;
return 0;
}
/* We don't update if it's older than what we have. */
static void update_nl_seq(struct nf_conn *ct, u32 nl_seq,
struct nf_ct_ftp_master *info, int dir,
struct sk_buff *skb)
{
unsigned int i, oldest;
/* Look for oldest: if we find exact match, we're done. */
for (i = 0; i < info->seq_aft_nl_num[dir]; i++) {
if (info->seq_aft_nl[dir][i] == nl_seq)
return;
}
if (info->seq_aft_nl_num[dir] < NUM_SEQ_TO_REMEMBER) {
info->seq_aft_nl[dir][info->seq_aft_nl_num[dir]++] = nl_seq;
} else {
if (before(info->seq_aft_nl[dir][0], info->seq_aft_nl[dir][1]))
oldest = 0;
else
oldest = 1;
if (after(nl_seq, info->seq_aft_nl[dir][oldest]))
info->seq_aft_nl[dir][oldest] = nl_seq;
}
}
static int help(struct sk_buff *skb,
unsigned int protoff,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo)
{
unsigned int dataoff, datalen;
const struct tcphdr *th;
struct tcphdr _tcph;
const char *fb_ptr;
int ret;
u32 seq;
int dir = CTINFO2DIR(ctinfo);
unsigned int matchlen, matchoff;
struct nf_ct_ftp_master *ct_ftp_info = nfct_help_data(ct);
struct nf_conntrack_expect *exp;
union nf_inet_addr *daddr;
struct nf_conntrack_man cmd = {};
unsigned int i;
int found = 0, ends_in_nl;
typeof(nf_nat_ftp_hook) nf_nat_ftp;
/* Until there's been traffic both ways, don't look in packets. */
if (ctinfo != IP_CT_ESTABLISHED &&
ctinfo != IP_CT_ESTABLISHED_REPLY) {
pr_debug("ftp: Conntrackinfo = %u\n", ctinfo);
return NF_ACCEPT;
}
if (unlikely(skb_linearize(skb)))
return NF_DROP;
th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
if (th == NULL)
return NF_ACCEPT;
dataoff = protoff + th->doff * 4;
/* No data? */
if (dataoff >= skb->len) {
pr_debug("ftp: dataoff(%u) >= skblen(%u)\n", dataoff,
skb->len);
return NF_ACCEPT;
}
datalen = skb->len - dataoff;
/* seqadj (nat) uses ct->lock internally, nf_nat_ftp would cause deadlock */
spin_lock_bh(&nf_ftp_lock);
fb_ptr = skb->data + dataoff;
ends_in_nl = (fb_ptr[datalen - 1] == '\n');
seq = ntohl(th->seq) + datalen;
/* Look up to see if we're just after a \n. */
if (!find_nl_seq(ntohl(th->seq), ct_ftp_info, dir)) {
/* We're picking up this, clear flags and let it continue */
if (unlikely(ct_ftp_info->flags[dir] & NF_CT_FTP_SEQ_PICKUP)) {
ct_ftp_info->flags[dir] ^= NF_CT_FTP_SEQ_PICKUP;
goto skip_nl_seq;
}
/* Now if this ends in \n, update ftp info. */
pr_debug("nf_conntrack_ftp: wrong seq pos %s(%u) or %s(%u)\n",
ct_ftp_info->seq_aft_nl_num[dir] > 0 ? "" : "(UNSET)",
ct_ftp_info->seq_aft_nl[dir][0],
ct_ftp_info->seq_aft_nl_num[dir] > 1 ? "" : "(UNSET)",
ct_ftp_info->seq_aft_nl[dir][1]);
ret = NF_ACCEPT;
goto out_update_nl;
}
skip_nl_seq:
/* Initialize IP/IPv6 addr to expected address (it's not mentioned
in EPSV responses) */
cmd.l3num = nf_ct_l3num(ct);
memcpy(cmd.u3.all, &ct->tuplehash[dir].tuple.src.u3.all,
sizeof(cmd.u3.all));
for (i = 0; i < ARRAY_SIZE(search[dir]); i++) {
found = find_pattern(fb_ptr, datalen,
search[dir][i].pattern,
search[dir][i].plen,
search[dir][i].skip,
search[dir][i].term,
&matchoff, &matchlen,
&cmd,
search[dir][i].getnum);
if (found) break;
}
if (found == -1) {
/* We don't usually drop packets. After all, this is
connection tracking, not packet filtering.
However, it is necessary for accurate tracking in
this case. */
nf_ct_helper_log(skb, ct, "partial matching of `%s'",
search[dir][i].pattern);
ret = NF_DROP;
goto out;
} else if (found == 0) { /* No match */
ret = NF_ACCEPT;
goto out_update_nl;
}
pr_debug("conntrack_ftp: match `%.*s' (%u bytes at %u)\n",
matchlen, fb_ptr + matchoff,
matchlen, ntohl(th->seq) + matchoff);
exp = nf_ct_expect_alloc(ct);
if (exp == NULL) {
nf_ct_helper_log(skb, ct, "cannot alloc expectation");
ret = NF_DROP;
goto out;
}
/* We refer to the reverse direction ("!dir") tuples here,
* because we're expecting something in the other direction.
* Doesn't matter unless NAT is happening. */
daddr = &ct->tuplehash[!dir].tuple.dst.u3;
/* Update the ftp info */
if ((cmd.l3num == nf_ct_l3num(ct)) &&
memcmp(&cmd.u3.all, &ct->tuplehash[dir].tuple.src.u3.all,
sizeof(cmd.u3.all))) {
/* Enrico Scholz's passive FTP to partially RNAT'd ftp
server: it really wants us to connect to a
different IP address. Simply don't record it for
NAT. */
if (cmd.l3num == PF_INET) {
pr_debug("NOT RECORDING: %pI4 != %pI4\n",
&cmd.u3.ip,
&ct->tuplehash[dir].tuple.src.u3.ip);
} else {
pr_debug("NOT RECORDING: %pI6 != %pI6\n",
cmd.u3.ip6,
ct->tuplehash[dir].tuple.src.u3.ip6);
}
/* Thanks to Cristiano Lincoln Mattos
<[email protected]> for reporting this potential
problem (DMZ machines opening holes to internal
networks, or the packet filter itself). */
if (!loose) {
ret = NF_ACCEPT;
goto out_put_expect;
}
daddr = &cmd.u3;
}
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, cmd.l3num,
&ct->tuplehash[!dir].tuple.src.u3, daddr,
IPPROTO_TCP, NULL, &cmd.u.tcp.port);
/* Now, NAT might want to mangle the packet, and register the
* (possibly changed) expectation itself. */
nf_nat_ftp = rcu_dereference(nf_nat_ftp_hook);
if (nf_nat_ftp && ct->status & IPS_NAT_MASK)
ret = nf_nat_ftp(skb, ctinfo, search[dir][i].ftptype,
protoff, matchoff, matchlen, exp);
else {
/* Can't expect this? Best to drop packet now. */
if (nf_ct_expect_related(exp, 0) != 0) {
nf_ct_helper_log(skb, ct, "cannot add expectation");
ret = NF_DROP;
} else
ret = NF_ACCEPT;
}
out_put_expect:
nf_ct_expect_put(exp);
out_update_nl:
/* Now if this ends in \n, update ftp info. Seq may have been
* adjusted by NAT code. */
if (ends_in_nl)
update_nl_seq(ct, seq, ct_ftp_info, dir, skb);
out:
spin_unlock_bh(&nf_ftp_lock);
return ret;
}
static int nf_ct_ftp_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
{
struct nf_ct_ftp_master *ftp = nfct_help_data(ct);
/* This conntrack has been injected from user-space, always pick up
* sequence tracking. Otherwise, the first FTP command after the
* failover breaks.
*/
ftp->flags[IP_CT_DIR_ORIGINAL] |= NF_CT_FTP_SEQ_PICKUP;
ftp->flags[IP_CT_DIR_REPLY] |= NF_CT_FTP_SEQ_PICKUP;
return 0;
}
static struct nf_conntrack_helper ftp[MAX_PORTS * 2] __read_mostly;
static const struct nf_conntrack_expect_policy ftp_exp_policy = {
.max_expected = 1,
.timeout = 5 * 60,
};
static void __exit nf_conntrack_ftp_fini(void)
{
nf_conntrack_helpers_unregister(ftp, ports_c * 2);
}
static int __init nf_conntrack_ftp_init(void)
{
int i, ret = 0;
NF_CT_HELPER_BUILD_BUG_ON(sizeof(struct nf_ct_ftp_master));
if (ports_c == 0)
ports[ports_c++] = FTP_PORT;
/* FIXME should be configurable whether IPv4 and IPv6 FTP connections
are tracked or not - YK */
for (i = 0; i < ports_c; i++) {
nf_ct_helper_init(&ftp[2 * i], AF_INET, IPPROTO_TCP,
HELPER_NAME, FTP_PORT, ports[i], ports[i],
&ftp_exp_policy, 0, help,
nf_ct_ftp_from_nlattr, THIS_MODULE);
nf_ct_helper_init(&ftp[2 * i + 1], AF_INET6, IPPROTO_TCP,
HELPER_NAME, FTP_PORT, ports[i], ports[i],
&ftp_exp_policy, 0, help,
nf_ct_ftp_from_nlattr, THIS_MODULE);
}
ret = nf_conntrack_helpers_register(ftp, ports_c * 2);
if (ret < 0) {
pr_err("failed to register helpers\n");
return ret;
}
return 0;
}
module_init(nf_conntrack_ftp_init);
module_exit(nf_conntrack_ftp_fini);
| linux-master | net/netfilter/nf_conntrack_ftp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016 Laura Garcia <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <linux/random.h>
#include <linux/static_key.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
struct nft_ng_inc {
u8 dreg;
u32 modulus;
atomic_t *counter;
u32 offset;
};
static u32 nft_ng_inc_gen(struct nft_ng_inc *priv)
{
u32 nval, oval;
do {
oval = atomic_read(priv->counter);
nval = (oval + 1 < priv->modulus) ? oval + 1 : 0;
} while (atomic_cmpxchg(priv->counter, oval, nval) != oval);
return nval + priv->offset;
}
static void nft_ng_inc_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_ng_inc *priv = nft_expr_priv(expr);
regs->data[priv->dreg] = nft_ng_inc_gen(priv);
}
static const struct nla_policy nft_ng_policy[NFTA_NG_MAX + 1] = {
[NFTA_NG_DREG] = { .type = NLA_U32 },
[NFTA_NG_MODULUS] = { .type = NLA_U32 },
[NFTA_NG_TYPE] = { .type = NLA_U32 },
[NFTA_NG_OFFSET] = { .type = NLA_U32 },
};
static int nft_ng_inc_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_ng_inc *priv = nft_expr_priv(expr);
int err;
if (tb[NFTA_NG_OFFSET])
priv->offset = ntohl(nla_get_be32(tb[NFTA_NG_OFFSET]));
priv->modulus = ntohl(nla_get_be32(tb[NFTA_NG_MODULUS]));
if (priv->modulus == 0)
return -ERANGE;
if (priv->offset + priv->modulus - 1 < priv->offset)
return -EOVERFLOW;
priv->counter = kmalloc(sizeof(*priv->counter), GFP_KERNEL);
if (!priv->counter)
return -ENOMEM;
atomic_set(priv->counter, priv->modulus - 1);
err = nft_parse_register_store(ctx, tb[NFTA_NG_DREG], &priv->dreg,
NULL, NFT_DATA_VALUE, sizeof(u32));
if (err < 0)
goto err;
return 0;
err:
kfree(priv->counter);
return err;
}
static bool nft_ng_inc_reduce(struct nft_regs_track *track,
const struct nft_expr *expr)
{
const struct nft_ng_inc *priv = nft_expr_priv(expr);
nft_reg_track_cancel(track, priv->dreg, NFT_REG32_SIZE);
return false;
}
static int nft_ng_dump(struct sk_buff *skb, enum nft_registers dreg,
u32 modulus, enum nft_ng_types type, u32 offset)
{
if (nft_dump_register(skb, NFTA_NG_DREG, dreg))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_NG_MODULUS, htonl(modulus)))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_NG_TYPE, htonl(type)))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_NG_OFFSET, htonl(offset)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static int nft_ng_inc_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_ng_inc *priv = nft_expr_priv(expr);
return nft_ng_dump(skb, priv->dreg, priv->modulus, NFT_NG_INCREMENTAL,
priv->offset);
}
static void nft_ng_inc_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
const struct nft_ng_inc *priv = nft_expr_priv(expr);
kfree(priv->counter);
}
struct nft_ng_random {
u8 dreg;
u32 modulus;
u32 offset;
};
static u32 nft_ng_random_gen(const struct nft_ng_random *priv)
{
return reciprocal_scale(get_random_u32(), priv->modulus) + priv->offset;
}
static void nft_ng_random_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_ng_random *priv = nft_expr_priv(expr);
regs->data[priv->dreg] = nft_ng_random_gen(priv);
}
static int nft_ng_random_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_ng_random *priv = nft_expr_priv(expr);
if (tb[NFTA_NG_OFFSET])
priv->offset = ntohl(nla_get_be32(tb[NFTA_NG_OFFSET]));
priv->modulus = ntohl(nla_get_be32(tb[NFTA_NG_MODULUS]));
if (priv->modulus == 0)
return -ERANGE;
if (priv->offset + priv->modulus - 1 < priv->offset)
return -EOVERFLOW;
return nft_parse_register_store(ctx, tb[NFTA_NG_DREG], &priv->dreg,
NULL, NFT_DATA_VALUE, sizeof(u32));
}
static int nft_ng_random_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_ng_random *priv = nft_expr_priv(expr);
return nft_ng_dump(skb, priv->dreg, priv->modulus, NFT_NG_RANDOM,
priv->offset);
}
static bool nft_ng_random_reduce(struct nft_regs_track *track,
const struct nft_expr *expr)
{
const struct nft_ng_random *priv = nft_expr_priv(expr);
nft_reg_track_cancel(track, priv->dreg, NFT_REG32_SIZE);
return false;
}
static struct nft_expr_type nft_ng_type;
static const struct nft_expr_ops nft_ng_inc_ops = {
.type = &nft_ng_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_ng_inc)),
.eval = nft_ng_inc_eval,
.init = nft_ng_inc_init,
.destroy = nft_ng_inc_destroy,
.dump = nft_ng_inc_dump,
.reduce = nft_ng_inc_reduce,
};
static const struct nft_expr_ops nft_ng_random_ops = {
.type = &nft_ng_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_ng_random)),
.eval = nft_ng_random_eval,
.init = nft_ng_random_init,
.dump = nft_ng_random_dump,
.reduce = nft_ng_random_reduce,
};
static const struct nft_expr_ops *
nft_ng_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
{
u32 type;
if (!tb[NFTA_NG_DREG] ||
!tb[NFTA_NG_MODULUS] ||
!tb[NFTA_NG_TYPE])
return ERR_PTR(-EINVAL);
type = ntohl(nla_get_be32(tb[NFTA_NG_TYPE]));
switch (type) {
case NFT_NG_INCREMENTAL:
return &nft_ng_inc_ops;
case NFT_NG_RANDOM:
return &nft_ng_random_ops;
}
return ERR_PTR(-EINVAL);
}
static struct nft_expr_type nft_ng_type __read_mostly = {
.name = "numgen",
.select_ops = nft_ng_select_ops,
.policy = nft_ng_policy,
.maxattr = NFTA_NG_MAX,
.owner = THIS_MODULE,
};
static int __init nft_ng_module_init(void)
{
return nft_register_expr(&nft_ng_type);
}
static void __exit nft_ng_module_exit(void)
{
nft_unregister_expr(&nft_ng_type);
}
module_init(nft_ng_module_init);
module_exit(nft_ng_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Laura Garcia <[email protected]>");
MODULE_ALIAS_NFT_EXPR("numgen");
MODULE_DESCRIPTION("nftables number generator module");
| linux-master | net/netfilter/nft_numgen.c |
// SPDX-License-Identifier: GPL-2.0-only
/* FTP extension for TCP NAT alteration. */
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2006 Netfilter Core Team <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/inet.h>
#include <linux/tcp.h>
#include <linux/netfilter_ipv4.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_helper.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <linux/netfilter/nf_conntrack_ftp.h>
#define NAT_HELPER_NAME "ftp"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Rusty Russell <[email protected]>");
MODULE_DESCRIPTION("ftp NAT helper");
MODULE_ALIAS_NF_NAT_HELPER(NAT_HELPER_NAME);
/* FIXME: Time out? --RR */
static struct nf_conntrack_nat_helper nat_helper_ftp =
NF_CT_NAT_HELPER_INIT(NAT_HELPER_NAME);
static int nf_nat_ftp_fmt_cmd(struct nf_conn *ct, enum nf_ct_ftp_type type,
char *buffer, size_t buflen,
union nf_inet_addr *addr, u16 port)
{
switch (type) {
case NF_CT_FTP_PORT:
case NF_CT_FTP_PASV:
return snprintf(buffer, buflen, "%u,%u,%u,%u,%u,%u",
((unsigned char *)&addr->ip)[0],
((unsigned char *)&addr->ip)[1],
((unsigned char *)&addr->ip)[2],
((unsigned char *)&addr->ip)[3],
port >> 8,
port & 0xFF);
case NF_CT_FTP_EPRT:
if (nf_ct_l3num(ct) == NFPROTO_IPV4)
return snprintf(buffer, buflen, "|1|%pI4|%u|",
&addr->ip, port);
else
return snprintf(buffer, buflen, "|2|%pI6|%u|",
&addr->ip6, port);
case NF_CT_FTP_EPSV:
return snprintf(buffer, buflen, "|||%u|", port);
}
return 0;
}
/* So, this packet has hit the connection tracking matching code.
Mangle it, and change the expectation to match the new version. */
static unsigned int nf_nat_ftp(struct sk_buff *skb,
enum ip_conntrack_info ctinfo,
enum nf_ct_ftp_type type,
unsigned int protoff,
unsigned int matchoff,
unsigned int matchlen,
struct nf_conntrack_expect *exp)
{
union nf_inet_addr newaddr;
u_int16_t port;
int dir = CTINFO2DIR(ctinfo);
struct nf_conn *ct = exp->master;
char buffer[sizeof("|1||65535|") + INET6_ADDRSTRLEN];
unsigned int buflen;
pr_debug("type %i, off %u len %u\n", type, matchoff, matchlen);
/* Connection will come from wherever this packet goes, hence !dir */
newaddr = ct->tuplehash[!dir].tuple.dst.u3;
exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
exp->dir = !dir;
/* When you see the packet, we need to NAT it the same as the
* this one. */
exp->expectfn = nf_nat_follow_master;
port = nf_nat_exp_find_port(exp, ntohs(exp->saved_proto.tcp.port));
if (port == 0) {
nf_ct_helper_log(skb, exp->master, "all ports in use");
return NF_DROP;
}
buflen = nf_nat_ftp_fmt_cmd(ct, type, buffer, sizeof(buffer),
&newaddr, port);
if (!buflen)
goto out;
pr_debug("calling nf_nat_mangle_tcp_packet\n");
if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, matchoff,
matchlen, buffer, buflen))
goto out;
return NF_ACCEPT;
out:
nf_ct_helper_log(skb, ct, "cannot mangle packet");
nf_ct_unexpect_related(exp);
return NF_DROP;
}
static void __exit nf_nat_ftp_fini(void)
{
nf_nat_helper_unregister(&nat_helper_ftp);
RCU_INIT_POINTER(nf_nat_ftp_hook, NULL);
synchronize_rcu();
}
static int __init nf_nat_ftp_init(void)
{
BUG_ON(nf_nat_ftp_hook != NULL);
nf_nat_helper_register(&nat_helper_ftp);
RCU_INIT_POINTER(nf_nat_ftp_hook, nf_nat_ftp);
return 0;
}
/* Prior to 2.6.11, we had a ports param. No longer, but don't break users. */
static int warn_set(const char *val, const struct kernel_param *kp)
{
pr_info("kernel >= 2.6.10 only uses 'ports' for conntrack modules\n");
return 0;
}
module_param_call(ports, warn_set, NULL, NULL, 0);
module_init(nf_nat_ftp_init);
module_exit(nf_nat_ftp_fini);
| linux-master | net/netfilter/nf_nat_ftp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2009 Patrick McHardy <[email protected]>
*
* Development of this code funded by Astaro AG (http://www.astaro.com/)
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/rbtree.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
struct nft_lookup {
struct nft_set *set;
u8 sreg;
u8 dreg;
bool dreg_set;
bool invert;
struct nft_set_binding binding;
};
#ifdef CONFIG_RETPOLINE
bool nft_set_do_lookup(const struct net *net, const struct nft_set *set,
const u32 *key, const struct nft_set_ext **ext)
{
if (set->ops == &nft_set_hash_fast_type.ops)
return nft_hash_lookup_fast(net, set, key, ext);
if (set->ops == &nft_set_hash_type.ops)
return nft_hash_lookup(net, set, key, ext);
if (set->ops == &nft_set_rhash_type.ops)
return nft_rhash_lookup(net, set, key, ext);
if (set->ops == &nft_set_bitmap_type.ops)
return nft_bitmap_lookup(net, set, key, ext);
if (set->ops == &nft_set_pipapo_type.ops)
return nft_pipapo_lookup(net, set, key, ext);
#if defined(CONFIG_X86_64) && !defined(CONFIG_UML)
if (set->ops == &nft_set_pipapo_avx2_type.ops)
return nft_pipapo_avx2_lookup(net, set, key, ext);
#endif
if (set->ops == &nft_set_rbtree_type.ops)
return nft_rbtree_lookup(net, set, key, ext);
WARN_ON_ONCE(1);
return set->ops->lookup(net, set, key, ext);
}
EXPORT_SYMBOL_GPL(nft_set_do_lookup);
#endif
void nft_lookup_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_lookup *priv = nft_expr_priv(expr);
const struct nft_set *set = priv->set;
const struct nft_set_ext *ext = NULL;
const struct net *net = nft_net(pkt);
bool found;
found = nft_set_do_lookup(net, set, ®s->data[priv->sreg], &ext) ^
priv->invert;
if (!found) {
ext = nft_set_catchall_lookup(net, set);
if (!ext) {
regs->verdict.code = NFT_BREAK;
return;
}
}
if (ext) {
if (priv->dreg_set)
nft_data_copy(®s->data[priv->dreg],
nft_set_ext_data(ext), set->dlen);
nft_set_elem_update_expr(ext, regs, pkt);
}
}
static const struct nla_policy nft_lookup_policy[NFTA_LOOKUP_MAX + 1] = {
[NFTA_LOOKUP_SET] = { .type = NLA_STRING,
.len = NFT_SET_MAXNAMELEN - 1 },
[NFTA_LOOKUP_SET_ID] = { .type = NLA_U32 },
[NFTA_LOOKUP_SREG] = { .type = NLA_U32 },
[NFTA_LOOKUP_DREG] = { .type = NLA_U32 },
[NFTA_LOOKUP_FLAGS] =
NLA_POLICY_MASK(NLA_BE32, NFT_LOOKUP_F_INV),
};
static int nft_lookup_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_lookup *priv = nft_expr_priv(expr);
u8 genmask = nft_genmask_next(ctx->net);
struct nft_set *set;
u32 flags;
int err;
if (tb[NFTA_LOOKUP_SET] == NULL ||
tb[NFTA_LOOKUP_SREG] == NULL)
return -EINVAL;
set = nft_set_lookup_global(ctx->net, ctx->table, tb[NFTA_LOOKUP_SET],
tb[NFTA_LOOKUP_SET_ID], genmask);
if (IS_ERR(set))
return PTR_ERR(set);
err = nft_parse_register_load(tb[NFTA_LOOKUP_SREG], &priv->sreg,
set->klen);
if (err < 0)
return err;
if (tb[NFTA_LOOKUP_FLAGS]) {
flags = ntohl(nla_get_be32(tb[NFTA_LOOKUP_FLAGS]));
if (flags & NFT_LOOKUP_F_INV)
priv->invert = true;
}
if (tb[NFTA_LOOKUP_DREG] != NULL) {
if (priv->invert)
return -EINVAL;
if (!(set->flags & NFT_SET_MAP))
return -EINVAL;
err = nft_parse_register_store(ctx, tb[NFTA_LOOKUP_DREG],
&priv->dreg, NULL, set->dtype,
set->dlen);
if (err < 0)
return err;
priv->dreg_set = true;
} else if (set->flags & NFT_SET_MAP) {
/* Map given, but user asks for lookup only (i.e. to
* ignore value assoicated with key).
*
* This makes no sense for anonymous maps since they are
* scoped to the rule, but for named sets this can be useful.
*/
if (set->flags & NFT_SET_ANONYMOUS)
return -EINVAL;
}
priv->binding.flags = set->flags & NFT_SET_MAP;
err = nf_tables_bind_set(ctx, set, &priv->binding);
if (err < 0)
return err;
priv->set = set;
return 0;
}
static void nft_lookup_deactivate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
enum nft_trans_phase phase)
{
struct nft_lookup *priv = nft_expr_priv(expr);
nf_tables_deactivate_set(ctx, priv->set, &priv->binding, phase);
}
static void nft_lookup_activate(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
struct nft_lookup *priv = nft_expr_priv(expr);
nf_tables_activate_set(ctx, priv->set);
}
static void nft_lookup_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
struct nft_lookup *priv = nft_expr_priv(expr);
nf_tables_destroy_set(ctx, priv->set);
}
static int nft_lookup_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_lookup *priv = nft_expr_priv(expr);
u32 flags = priv->invert ? NFT_LOOKUP_F_INV : 0;
if (nla_put_string(skb, NFTA_LOOKUP_SET, priv->set->name))
goto nla_put_failure;
if (nft_dump_register(skb, NFTA_LOOKUP_SREG, priv->sreg))
goto nla_put_failure;
if (priv->dreg_set)
if (nft_dump_register(skb, NFTA_LOOKUP_DREG, priv->dreg))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_LOOKUP_FLAGS, htonl(flags)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static int nft_lookup_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **d)
{
const struct nft_lookup *priv = nft_expr_priv(expr);
struct nft_set_iter iter;
if (!(priv->set->flags & NFT_SET_MAP) ||
priv->set->dtype != NFT_DATA_VERDICT)
return 0;
iter.genmask = nft_genmask_next(ctx->net);
iter.skip = 0;
iter.count = 0;
iter.err = 0;
iter.fn = nft_setelem_validate;
priv->set->ops->walk(ctx, priv->set, &iter);
if (!iter.err)
iter.err = nft_set_catchall_validate(ctx, priv->set);
if (iter.err < 0)
return iter.err;
return 0;
}
static bool nft_lookup_reduce(struct nft_regs_track *track,
const struct nft_expr *expr)
{
const struct nft_lookup *priv = nft_expr_priv(expr);
if (priv->set->flags & NFT_SET_MAP)
nft_reg_track_cancel(track, priv->dreg, priv->set->dlen);
return false;
}
static const struct nft_expr_ops nft_lookup_ops = {
.type = &nft_lookup_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_lookup)),
.eval = nft_lookup_eval,
.init = nft_lookup_init,
.activate = nft_lookup_activate,
.deactivate = nft_lookup_deactivate,
.destroy = nft_lookup_destroy,
.dump = nft_lookup_dump,
.validate = nft_lookup_validate,
.reduce = nft_lookup_reduce,
};
struct nft_expr_type nft_lookup_type __read_mostly = {
.name = "lookup",
.ops = &nft_lookup_ops,
.policy = nft_lookup_policy,
.maxattr = NFTA_LOOKUP_MAX,
.owner = THIS_MODULE,
};
| linux-master | net/netfilter/nft_lookup.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2006 Netfilter Core Team <[email protected]>
* Copyright (c) 2011 Patrick McHardy <[email protected]>
*
* Based on Rusty Russell's IPv4 REDIRECT target. Development of IPv6
* NAT funded by Astaro.
*/
#include <linux/if.h>
#include <linux/inetdevice.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/netfilter.h>
#include <linux/types.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter/x_tables.h>
#include <net/addrconf.h>
#include <net/checksum.h>
#include <net/protocol.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_redirect.h>
static unsigned int
nf_nat_redirect(struct sk_buff *skb, const struct nf_nat_range2 *range,
const union nf_inet_addr *newdst)
{
struct nf_nat_range2 newrange;
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
ct = nf_ct_get(skb, &ctinfo);
memset(&newrange, 0, sizeof(newrange));
newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS;
newrange.min_addr = *newdst;
newrange.max_addr = *newdst;
newrange.min_proto = range->min_proto;
newrange.max_proto = range->max_proto;
return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
}
unsigned int
nf_nat_redirect_ipv4(struct sk_buff *skb, const struct nf_nat_range2 *range,
unsigned int hooknum)
{
union nf_inet_addr newdst = {};
WARN_ON(hooknum != NF_INET_PRE_ROUTING &&
hooknum != NF_INET_LOCAL_OUT);
/* Local packets: make them go to loopback */
if (hooknum == NF_INET_LOCAL_OUT) {
newdst.ip = htonl(INADDR_LOOPBACK);
} else {
const struct in_device *indev;
indev = __in_dev_get_rcu(skb->dev);
if (indev) {
const struct in_ifaddr *ifa;
ifa = rcu_dereference(indev->ifa_list);
if (ifa)
newdst.ip = ifa->ifa_local;
}
if (!newdst.ip)
return NF_DROP;
}
return nf_nat_redirect(skb, range, &newdst);
}
EXPORT_SYMBOL_GPL(nf_nat_redirect_ipv4);
static const struct in6_addr loopback_addr = IN6ADDR_LOOPBACK_INIT;
unsigned int
nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
unsigned int hooknum)
{
union nf_inet_addr newdst = {};
if (hooknum == NF_INET_LOCAL_OUT) {
newdst.in6 = loopback_addr;
} else {
struct inet6_dev *idev;
struct inet6_ifaddr *ifa;
bool addr = false;
idev = __in6_dev_get(skb->dev);
if (idev != NULL) {
read_lock_bh(&idev->lock);
list_for_each_entry(ifa, &idev->addr_list, if_list) {
newdst.in6 = ifa->addr;
addr = true;
break;
}
read_unlock_bh(&idev->lock);
}
if (!addr)
return NF_DROP;
}
return nf_nat_redirect(skb, range, &newdst);
}
EXPORT_SYMBOL_GPL(nf_nat_redirect_ipv6);
| linux-master | net/netfilter/nf_nat_redirect.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* netfilter module to enforce network quotas
*
* Sam Johnston <[email protected]>
*/
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_quota.h>
#include <linux/module.h>
struct xt_quota_priv {
spinlock_t lock;
uint64_t quota;
};
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sam Johnston <[email protected]>");
MODULE_DESCRIPTION("Xtables: countdown quota match");
MODULE_ALIAS("ipt_quota");
MODULE_ALIAS("ip6t_quota");
static bool
quota_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
struct xt_quota_info *q = (void *)par->matchinfo;
struct xt_quota_priv *priv = q->master;
bool ret = q->flags & XT_QUOTA_INVERT;
spin_lock_bh(&priv->lock);
if (priv->quota >= skb->len) {
priv->quota -= skb->len;
ret = !ret;
} else {
/* we do not allow even small packets from now on */
priv->quota = 0;
}
spin_unlock_bh(&priv->lock);
return ret;
}
static int quota_mt_check(const struct xt_mtchk_param *par)
{
struct xt_quota_info *q = par->matchinfo;
if (q->flags & ~XT_QUOTA_MASK)
return -EINVAL;
q->master = kmalloc(sizeof(*q->master), GFP_KERNEL);
if (q->master == NULL)
return -ENOMEM;
spin_lock_init(&q->master->lock);
q->master->quota = q->quota;
return 0;
}
static void quota_mt_destroy(const struct xt_mtdtor_param *par)
{
const struct xt_quota_info *q = par->matchinfo;
kfree(q->master);
}
static struct xt_match quota_mt_reg __read_mostly = {
.name = "quota",
.revision = 0,
.family = NFPROTO_UNSPEC,
.match = quota_mt,
.checkentry = quota_mt_check,
.destroy = quota_mt_destroy,
.matchsize = sizeof(struct xt_quota_info),
.usersize = offsetof(struct xt_quota_info, master),
.me = THIS_MODULE,
};
static int __init quota_mt_init(void)
{
return xt_register_match("a_mt_reg);
}
static void __exit quota_mt_exit(void)
{
xt_unregister_match("a_mt_reg);
}
module_init(quota_mt_init);
module_exit(quota_mt_exit);
| linux-master | net/netfilter/xt_quota.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Creates audit record for dropped/accepted packets
*
* (C) 2010-2011 Thomas Graf <[email protected]>
* (C) 2010-2011 Red Hat, Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/audit.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/if_arp.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_AUDIT.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <net/ipv6.h>
#include <net/ip.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Thomas Graf <[email protected]>");
MODULE_DESCRIPTION("Xtables: creates audit records for dropped/accepted packets");
MODULE_ALIAS("ipt_AUDIT");
MODULE_ALIAS("ip6t_AUDIT");
MODULE_ALIAS("ebt_AUDIT");
MODULE_ALIAS("arpt_AUDIT");
static bool audit_ip4(struct audit_buffer *ab, struct sk_buff *skb)
{
struct iphdr _iph;
const struct iphdr *ih;
ih = skb_header_pointer(skb, skb_network_offset(skb), sizeof(_iph), &_iph);
if (!ih)
return false;
audit_log_format(ab, " saddr=%pI4 daddr=%pI4 proto=%hhu",
&ih->saddr, &ih->daddr, ih->protocol);
return true;
}
static bool audit_ip6(struct audit_buffer *ab, struct sk_buff *skb)
{
struct ipv6hdr _ip6h;
const struct ipv6hdr *ih;
u8 nexthdr;
__be16 frag_off;
ih = skb_header_pointer(skb, skb_network_offset(skb), sizeof(_ip6h), &_ip6h);
if (!ih)
return false;
nexthdr = ih->nexthdr;
ipv6_skip_exthdr(skb, skb_network_offset(skb) + sizeof(_ip6h), &nexthdr, &frag_off);
audit_log_format(ab, " saddr=%pI6c daddr=%pI6c proto=%hhu",
&ih->saddr, &ih->daddr, nexthdr);
return true;
}
static unsigned int
audit_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
struct audit_buffer *ab;
int fam = -1;
if (audit_enabled == AUDIT_OFF)
goto errout;
ab = audit_log_start(NULL, GFP_ATOMIC, AUDIT_NETFILTER_PKT);
if (ab == NULL)
goto errout;
audit_log_format(ab, "mark=%#x", skb->mark);
switch (xt_family(par)) {
case NFPROTO_BRIDGE:
switch (eth_hdr(skb)->h_proto) {
case htons(ETH_P_IP):
fam = audit_ip4(ab, skb) ? NFPROTO_IPV4 : -1;
break;
case htons(ETH_P_IPV6):
fam = audit_ip6(ab, skb) ? NFPROTO_IPV6 : -1;
break;
}
break;
case NFPROTO_IPV4:
fam = audit_ip4(ab, skb) ? NFPROTO_IPV4 : -1;
break;
case NFPROTO_IPV6:
fam = audit_ip6(ab, skb) ? NFPROTO_IPV6 : -1;
break;
}
if (fam == -1)
audit_log_format(ab, " saddr=? daddr=? proto=-1");
audit_log_end(ab);
errout:
return XT_CONTINUE;
}
static unsigned int
audit_tg_ebt(struct sk_buff *skb, const struct xt_action_param *par)
{
audit_tg(skb, par);
return EBT_CONTINUE;
}
static int audit_tg_check(const struct xt_tgchk_param *par)
{
const struct xt_audit_info *info = par->targinfo;
if (info->type > XT_AUDIT_TYPE_MAX) {
pr_info_ratelimited("Audit type out of range (valid range: 0..%u)\n",
XT_AUDIT_TYPE_MAX);
return -ERANGE;
}
return 0;
}
static struct xt_target audit_tg_reg[] __read_mostly = {
{
.name = "AUDIT",
.family = NFPROTO_UNSPEC,
.target = audit_tg,
.targetsize = sizeof(struct xt_audit_info),
.checkentry = audit_tg_check,
.me = THIS_MODULE,
},
{
.name = "AUDIT",
.family = NFPROTO_BRIDGE,
.target = audit_tg_ebt,
.targetsize = sizeof(struct xt_audit_info),
.checkentry = audit_tg_check,
.me = THIS_MODULE,
},
};
static int __init audit_tg_init(void)
{
return xt_register_targets(audit_tg_reg, ARRAY_SIZE(audit_tg_reg));
}
static void __exit audit_tg_exit(void)
{
xt_unregister_targets(audit_tg_reg, ARRAY_SIZE(audit_tg_reg));
}
module_init(audit_tg_init);
module_exit(audit_tg_exit);
| linux-master | net/netfilter/xt_AUDIT.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2012-2013 by Pablo Neira Ayuso <[email protected]>
*
* This software has been sponsored by Sophos Astaro <http://www.sophos.com>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nf_tables.h>
#include <linux/netfilter/nf_tables_compat.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_arp/arp_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_log.h>
/* Used for matches where *info is larger than X byte */
#define NFT_MATCH_LARGE_THRESH 192
struct nft_xt_match_priv {
void *info;
};
static int nft_compat_chain_validate_dependency(const struct nft_ctx *ctx,
const char *tablename)
{
enum nft_chain_types type = NFT_CHAIN_T_DEFAULT;
const struct nft_chain *chain = ctx->chain;
const struct nft_base_chain *basechain;
if (!tablename ||
!nft_is_base_chain(chain))
return 0;
basechain = nft_base_chain(chain);
if (strcmp(tablename, "nat") == 0) {
if (ctx->family != NFPROTO_BRIDGE)
type = NFT_CHAIN_T_NAT;
if (basechain->type->type != type)
return -EINVAL;
}
return 0;
}
union nft_entry {
struct ipt_entry e4;
struct ip6t_entry e6;
struct ebt_entry ebt;
struct arpt_entry arp;
};
static inline void
nft_compat_set_par(struct xt_action_param *par,
const struct nft_pktinfo *pkt,
const void *xt, const void *xt_info)
{
par->state = pkt->state;
par->thoff = nft_thoff(pkt);
par->fragoff = pkt->fragoff;
par->target = xt;
par->targinfo = xt_info;
par->hotdrop = false;
}
static void nft_target_eval_xt(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
void *info = nft_expr_priv(expr);
struct xt_target *target = expr->ops->data;
struct sk_buff *skb = pkt->skb;
struct xt_action_param xt;
int ret;
nft_compat_set_par(&xt, pkt, target, info);
ret = target->target(skb, &xt);
if (xt.hotdrop)
ret = NF_DROP;
switch (ret) {
case XT_CONTINUE:
regs->verdict.code = NFT_CONTINUE;
break;
default:
regs->verdict.code = ret;
break;
}
}
static void nft_target_eval_bridge(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
void *info = nft_expr_priv(expr);
struct xt_target *target = expr->ops->data;
struct sk_buff *skb = pkt->skb;
struct xt_action_param xt;
int ret;
nft_compat_set_par(&xt, pkt, target, info);
ret = target->target(skb, &xt);
if (xt.hotdrop)
ret = NF_DROP;
switch (ret) {
case EBT_ACCEPT:
regs->verdict.code = NF_ACCEPT;
break;
case EBT_DROP:
regs->verdict.code = NF_DROP;
break;
case EBT_CONTINUE:
regs->verdict.code = NFT_CONTINUE;
break;
case EBT_RETURN:
regs->verdict.code = NFT_RETURN;
break;
default:
regs->verdict.code = ret;
break;
}
}
static const struct nla_policy nft_target_policy[NFTA_TARGET_MAX + 1] = {
[NFTA_TARGET_NAME] = { .type = NLA_NUL_STRING },
[NFTA_TARGET_REV] = { .type = NLA_U32 },
[NFTA_TARGET_INFO] = { .type = NLA_BINARY },
};
static void
nft_target_set_tgchk_param(struct xt_tgchk_param *par,
const struct nft_ctx *ctx,
struct xt_target *target, void *info,
union nft_entry *entry, u16 proto, bool inv)
{
par->net = ctx->net;
par->table = ctx->table->name;
switch (ctx->family) {
case AF_INET:
entry->e4.ip.proto = proto;
entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0;
break;
case AF_INET6:
if (proto)
entry->e6.ipv6.flags |= IP6T_F_PROTO;
entry->e6.ipv6.proto = proto;
entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
break;
case NFPROTO_BRIDGE:
entry->ebt.ethproto = (__force __be16)proto;
entry->ebt.invflags = inv ? EBT_IPROTO : 0;
break;
case NFPROTO_ARP:
break;
}
par->entryinfo = entry;
par->target = target;
par->targinfo = info;
if (nft_is_base_chain(ctx->chain)) {
const struct nft_base_chain *basechain =
nft_base_chain(ctx->chain);
const struct nf_hook_ops *ops = &basechain->ops;
par->hook_mask = 1 << ops->hooknum;
} else {
par->hook_mask = 0;
}
par->family = ctx->family;
par->nft_compat = true;
}
static void target_compat_from_user(struct xt_target *t, void *in, void *out)
{
int pad;
memcpy(out, in, t->targetsize);
pad = XT_ALIGN(t->targetsize) - t->targetsize;
if (pad > 0)
memset(out + t->targetsize, 0, pad);
}
static const struct nla_policy nft_rule_compat_policy[NFTA_RULE_COMPAT_MAX + 1] = {
[NFTA_RULE_COMPAT_PROTO] = { .type = NLA_U32 },
[NFTA_RULE_COMPAT_FLAGS] = { .type = NLA_U32 },
};
static int nft_parse_compat(const struct nlattr *attr, u16 *proto, bool *inv)
{
struct nlattr *tb[NFTA_RULE_COMPAT_MAX+1];
u32 flags;
int err;
err = nla_parse_nested_deprecated(tb, NFTA_RULE_COMPAT_MAX, attr,
nft_rule_compat_policy, NULL);
if (err < 0)
return err;
if (!tb[NFTA_RULE_COMPAT_PROTO] || !tb[NFTA_RULE_COMPAT_FLAGS])
return -EINVAL;
flags = ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_FLAGS]));
if (flags & ~NFT_RULE_COMPAT_F_MASK)
return -EINVAL;
if (flags & NFT_RULE_COMPAT_F_INV)
*inv = true;
*proto = ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_PROTO]));
return 0;
}
static void nft_compat_wait_for_destructors(void)
{
/* xtables matches or targets can have side effects, e.g.
* creation/destruction of /proc files.
* The xt ->destroy functions are run asynchronously from
* work queue. If we have pending invocations we thus
* need to wait for those to finish.
*/
nf_tables_trans_destroy_flush_work();
}
static int
nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
const struct nlattr * const tb[])
{
void *info = nft_expr_priv(expr);
struct xt_target *target = expr->ops->data;
struct xt_tgchk_param par;
size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO]));
u16 proto = 0;
bool inv = false;
union nft_entry e = {};
int ret;
target_compat_from_user(target, nla_data(tb[NFTA_TARGET_INFO]), info);
if (ctx->nla[NFTA_RULE_COMPAT]) {
ret = nft_parse_compat(ctx->nla[NFTA_RULE_COMPAT], &proto, &inv);
if (ret < 0)
return ret;
}
nft_target_set_tgchk_param(&par, ctx, target, info, &e, proto, inv);
nft_compat_wait_for_destructors();
ret = xt_check_target(&par, size, proto, inv);
if (ret < 0) {
if (ret == -ENOENT) {
const char *modname = NULL;
if (strcmp(target->name, "LOG") == 0)
modname = "nf_log_syslog";
else if (strcmp(target->name, "NFLOG") == 0)
modname = "nfnetlink_log";
if (modname &&
nft_request_module(ctx->net, "%s", modname) == -EAGAIN)
return -EAGAIN;
}
return ret;
}
/* The standard target cannot be used */
if (!target->target)
return -EINVAL;
return 0;
}
static void __nft_mt_tg_destroy(struct module *me, const struct nft_expr *expr)
{
module_put(me);
kfree(expr->ops);
}
static void
nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
{
struct xt_target *target = expr->ops->data;
void *info = nft_expr_priv(expr);
struct module *me = target->me;
struct xt_tgdtor_param par;
par.net = ctx->net;
par.target = target;
par.targinfo = info;
par.family = ctx->family;
if (par.target->destroy != NULL)
par.target->destroy(&par);
__nft_mt_tg_destroy(me, expr);
}
static int nft_extension_dump_info(struct sk_buff *skb, int attr,
const void *info,
unsigned int size, unsigned int user_size)
{
unsigned int info_size, aligned_size = XT_ALIGN(size);
struct nlattr *nla;
nla = nla_reserve(skb, attr, aligned_size);
if (!nla)
return -1;
info_size = user_size ? : size;
memcpy(nla_data(nla), info, info_size);
memset(nla_data(nla) + info_size, 0, aligned_size - info_size);
return 0;
}
static int nft_target_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct xt_target *target = expr->ops->data;
void *info = nft_expr_priv(expr);
if (nla_put_string(skb, NFTA_TARGET_NAME, target->name) ||
nla_put_be32(skb, NFTA_TARGET_REV, htonl(target->revision)) ||
nft_extension_dump_info(skb, NFTA_TARGET_INFO, info,
target->targetsize, target->usersize))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static int nft_target_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
struct xt_target *target = expr->ops->data;
unsigned int hook_mask = 0;
int ret;
if (nft_is_base_chain(ctx->chain)) {
const struct nft_base_chain *basechain =
nft_base_chain(ctx->chain);
const struct nf_hook_ops *ops = &basechain->ops;
hook_mask = 1 << ops->hooknum;
if (target->hooks && !(hook_mask & target->hooks))
return -EINVAL;
ret = nft_compat_chain_validate_dependency(ctx, target->table);
if (ret < 0)
return ret;
}
return 0;
}
static void __nft_match_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt,
void *info)
{
struct xt_match *match = expr->ops->data;
struct sk_buff *skb = pkt->skb;
struct xt_action_param xt;
bool ret;
nft_compat_set_par(&xt, pkt, match, info);
ret = match->match(skb, &xt);
if (xt.hotdrop) {
regs->verdict.code = NF_DROP;
return;
}
switch (ret ? 1 : 0) {
case 1:
regs->verdict.code = NFT_CONTINUE;
break;
case 0:
regs->verdict.code = NFT_BREAK;
break;
}
}
static void nft_match_large_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_xt_match_priv *priv = nft_expr_priv(expr);
__nft_match_eval(expr, regs, pkt, priv->info);
}
static void nft_match_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
__nft_match_eval(expr, regs, pkt, nft_expr_priv(expr));
}
static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = {
[NFTA_MATCH_NAME] = { .type = NLA_NUL_STRING },
[NFTA_MATCH_REV] = { .type = NLA_U32 },
[NFTA_MATCH_INFO] = { .type = NLA_BINARY },
};
/* struct xt_mtchk_param and xt_tgchk_param look very similar */
static void
nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
struct xt_match *match, void *info,
union nft_entry *entry, u16 proto, bool inv)
{
par->net = ctx->net;
par->table = ctx->table->name;
switch (ctx->family) {
case AF_INET:
entry->e4.ip.proto = proto;
entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0;
break;
case AF_INET6:
if (proto)
entry->e6.ipv6.flags |= IP6T_F_PROTO;
entry->e6.ipv6.proto = proto;
entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
break;
case NFPROTO_BRIDGE:
entry->ebt.ethproto = (__force __be16)proto;
entry->ebt.invflags = inv ? EBT_IPROTO : 0;
break;
case NFPROTO_ARP:
break;
}
par->entryinfo = entry;
par->match = match;
par->matchinfo = info;
if (nft_is_base_chain(ctx->chain)) {
const struct nft_base_chain *basechain =
nft_base_chain(ctx->chain);
const struct nf_hook_ops *ops = &basechain->ops;
par->hook_mask = 1 << ops->hooknum;
} else {
par->hook_mask = 0;
}
par->family = ctx->family;
par->nft_compat = true;
}
static void match_compat_from_user(struct xt_match *m, void *in, void *out)
{
int pad;
memcpy(out, in, m->matchsize);
pad = XT_ALIGN(m->matchsize) - m->matchsize;
if (pad > 0)
memset(out + m->matchsize, 0, pad);
}
static int
__nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
const struct nlattr * const tb[],
void *info)
{
struct xt_match *match = expr->ops->data;
struct xt_mtchk_param par;
size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO]));
u16 proto = 0;
bool inv = false;
union nft_entry e = {};
int ret;
match_compat_from_user(match, nla_data(tb[NFTA_MATCH_INFO]), info);
if (ctx->nla[NFTA_RULE_COMPAT]) {
ret = nft_parse_compat(ctx->nla[NFTA_RULE_COMPAT], &proto, &inv);
if (ret < 0)
return ret;
}
nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv);
nft_compat_wait_for_destructors();
return xt_check_match(&par, size, proto, inv);
}
static int
nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
const struct nlattr * const tb[])
{
return __nft_match_init(ctx, expr, tb, nft_expr_priv(expr));
}
static int
nft_match_large_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_xt_match_priv *priv = nft_expr_priv(expr);
struct xt_match *m = expr->ops->data;
int ret;
priv->info = kmalloc(XT_ALIGN(m->matchsize), GFP_KERNEL);
if (!priv->info)
return -ENOMEM;
ret = __nft_match_init(ctx, expr, tb, priv->info);
if (ret)
kfree(priv->info);
return ret;
}
static void
__nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr,
void *info)
{
struct xt_match *match = expr->ops->data;
struct module *me = match->me;
struct xt_mtdtor_param par;
par.net = ctx->net;
par.match = match;
par.matchinfo = info;
par.family = ctx->family;
if (par.match->destroy != NULL)
par.match->destroy(&par);
__nft_mt_tg_destroy(me, expr);
}
static void
nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
{
__nft_match_destroy(ctx, expr, nft_expr_priv(expr));
}
static void
nft_match_large_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
{
struct nft_xt_match_priv *priv = nft_expr_priv(expr);
__nft_match_destroy(ctx, expr, priv->info);
kfree(priv->info);
}
static int __nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr,
void *info)
{
struct xt_match *match = expr->ops->data;
if (nla_put_string(skb, NFTA_MATCH_NAME, match->name) ||
nla_put_be32(skb, NFTA_MATCH_REV, htonl(match->revision)) ||
nft_extension_dump_info(skb, NFTA_MATCH_INFO, info,
match->matchsize, match->usersize))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static int nft_match_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
return __nft_match_dump(skb, expr, nft_expr_priv(expr));
}
static int nft_match_large_dump(struct sk_buff *skb,
const struct nft_expr *e, bool reset)
{
struct nft_xt_match_priv *priv = nft_expr_priv(e);
return __nft_match_dump(skb, e, priv->info);
}
static int nft_match_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
struct xt_match *match = expr->ops->data;
unsigned int hook_mask = 0;
int ret;
if (nft_is_base_chain(ctx->chain)) {
const struct nft_base_chain *basechain =
nft_base_chain(ctx->chain);
const struct nf_hook_ops *ops = &basechain->ops;
hook_mask = 1 << ops->hooknum;
if (match->hooks && !(hook_mask & match->hooks))
return -EINVAL;
ret = nft_compat_chain_validate_dependency(ctx, match->table);
if (ret < 0)
return ret;
}
return 0;
}
static int
nfnl_compat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
int event, u16 family, const char *name,
int rev, int target)
{
struct nlmsghdr *nlh;
unsigned int flags = portid ? NLM_F_MULTI : 0;
event = nfnl_msg_type(NFNL_SUBSYS_NFT_COMPAT, event);
nlh = nfnl_msg_put(skb, portid, seq, event, flags, family,
NFNETLINK_V0, 0);
if (!nlh)
goto nlmsg_failure;
if (nla_put_string(skb, NFTA_COMPAT_NAME, name) ||
nla_put_be32(skb, NFTA_COMPAT_REV, htonl(rev)) ||
nla_put_be32(skb, NFTA_COMPAT_TYPE, htonl(target)))
goto nla_put_failure;
nlmsg_end(skb, nlh);
return skb->len;
nlmsg_failure:
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -1;
}
static int nfnl_compat_get_rcu(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const tb[])
{
u8 family = info->nfmsg->nfgen_family;
const char *name, *fmt;
struct sk_buff *skb2;
int ret = 0, target;
u32 rev;
if (tb[NFTA_COMPAT_NAME] == NULL ||
tb[NFTA_COMPAT_REV] == NULL ||
tb[NFTA_COMPAT_TYPE] == NULL)
return -EINVAL;
name = nla_data(tb[NFTA_COMPAT_NAME]);
rev = ntohl(nla_get_be32(tb[NFTA_COMPAT_REV]));
target = ntohl(nla_get_be32(tb[NFTA_COMPAT_TYPE]));
switch(family) {
case AF_INET:
fmt = "ipt_%s";
break;
case AF_INET6:
fmt = "ip6t_%s";
break;
case NFPROTO_BRIDGE:
fmt = "ebt_%s";
break;
case NFPROTO_ARP:
fmt = "arpt_%s";
break;
default:
pr_err("nft_compat: unsupported protocol %d\n", family);
return -EINVAL;
}
if (!try_module_get(THIS_MODULE))
return -EINVAL;
rcu_read_unlock();
try_then_request_module(xt_find_revision(family, name, rev, target, &ret),
fmt, name);
if (ret < 0)
goto out_put;
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (skb2 == NULL) {
ret = -ENOMEM;
goto out_put;
}
/* include the best revision for this extension in the message */
if (nfnl_compat_fill_info(skb2, NETLINK_CB(skb).portid,
info->nlh->nlmsg_seq,
NFNL_MSG_TYPE(info->nlh->nlmsg_type),
NFNL_MSG_COMPAT_GET,
family, name, ret, target) <= 0) {
kfree_skb(skb2);
goto out_put;
}
ret = nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid);
out_put:
rcu_read_lock();
module_put(THIS_MODULE);
return ret;
}
static const struct nla_policy nfnl_compat_policy_get[NFTA_COMPAT_MAX+1] = {
[NFTA_COMPAT_NAME] = { .type = NLA_NUL_STRING,
.len = NFT_COMPAT_NAME_MAX-1 },
[NFTA_COMPAT_REV] = { .type = NLA_U32 },
[NFTA_COMPAT_TYPE] = { .type = NLA_U32 },
};
static const struct nfnl_callback nfnl_nft_compat_cb[NFNL_MSG_COMPAT_MAX] = {
[NFNL_MSG_COMPAT_GET] = {
.call = nfnl_compat_get_rcu,
.type = NFNL_CB_RCU,
.attr_count = NFTA_COMPAT_MAX,
.policy = nfnl_compat_policy_get
},
};
static const struct nfnetlink_subsystem nfnl_compat_subsys = {
.name = "nft-compat",
.subsys_id = NFNL_SUBSYS_NFT_COMPAT,
.cb_count = NFNL_MSG_COMPAT_MAX,
.cb = nfnl_nft_compat_cb,
};
static struct nft_expr_type nft_match_type;
static bool nft_match_reduce(struct nft_regs_track *track,
const struct nft_expr *expr)
{
const struct xt_match *match = expr->ops->data;
return strcmp(match->name, "comment") == 0;
}
static const struct nft_expr_ops *
nft_match_select_ops(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
struct nft_expr_ops *ops;
struct xt_match *match;
unsigned int matchsize;
char *mt_name;
u32 rev, family;
int err;
if (tb[NFTA_MATCH_NAME] == NULL ||
tb[NFTA_MATCH_REV] == NULL ||
tb[NFTA_MATCH_INFO] == NULL)
return ERR_PTR(-EINVAL);
mt_name = nla_data(tb[NFTA_MATCH_NAME]);
rev = ntohl(nla_get_be32(tb[NFTA_MATCH_REV]));
family = ctx->family;
match = xt_request_find_match(family, mt_name, rev);
if (IS_ERR(match))
return ERR_PTR(-ENOENT);
if (match->matchsize > nla_len(tb[NFTA_MATCH_INFO])) {
err = -EINVAL;
goto err;
}
ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL);
if (!ops) {
err = -ENOMEM;
goto err;
}
ops->type = &nft_match_type;
ops->eval = nft_match_eval;
ops->init = nft_match_init;
ops->destroy = nft_match_destroy;
ops->dump = nft_match_dump;
ops->validate = nft_match_validate;
ops->data = match;
ops->reduce = nft_match_reduce;
matchsize = NFT_EXPR_SIZE(XT_ALIGN(match->matchsize));
if (matchsize > NFT_MATCH_LARGE_THRESH) {
matchsize = NFT_EXPR_SIZE(sizeof(struct nft_xt_match_priv));
ops->eval = nft_match_large_eval;
ops->init = nft_match_large_init;
ops->destroy = nft_match_large_destroy;
ops->dump = nft_match_large_dump;
}
ops->size = matchsize;
return ops;
err:
module_put(match->me);
return ERR_PTR(err);
}
static void nft_match_release_ops(const struct nft_expr_ops *ops)
{
struct xt_match *match = ops->data;
module_put(match->me);
kfree(ops);
}
static struct nft_expr_type nft_match_type __read_mostly = {
.name = "match",
.select_ops = nft_match_select_ops,
.release_ops = nft_match_release_ops,
.policy = nft_match_policy,
.maxattr = NFTA_MATCH_MAX,
.owner = THIS_MODULE,
};
static struct nft_expr_type nft_target_type;
static const struct nft_expr_ops *
nft_target_select_ops(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
struct nft_expr_ops *ops;
struct xt_target *target;
char *tg_name;
u32 rev, family;
int err;
if (tb[NFTA_TARGET_NAME] == NULL ||
tb[NFTA_TARGET_REV] == NULL ||
tb[NFTA_TARGET_INFO] == NULL)
return ERR_PTR(-EINVAL);
tg_name = nla_data(tb[NFTA_TARGET_NAME]);
rev = ntohl(nla_get_be32(tb[NFTA_TARGET_REV]));
family = ctx->family;
if (strcmp(tg_name, XT_ERROR_TARGET) == 0 ||
strcmp(tg_name, XT_STANDARD_TARGET) == 0 ||
strcmp(tg_name, "standard") == 0)
return ERR_PTR(-EINVAL);
target = xt_request_find_target(family, tg_name, rev);
if (IS_ERR(target))
return ERR_PTR(-ENOENT);
if (!target->target) {
err = -EINVAL;
goto err;
}
if (target->targetsize > nla_len(tb[NFTA_TARGET_INFO])) {
err = -EINVAL;
goto err;
}
ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL);
if (!ops) {
err = -ENOMEM;
goto err;
}
ops->type = &nft_target_type;
ops->size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize));
ops->init = nft_target_init;
ops->destroy = nft_target_destroy;
ops->dump = nft_target_dump;
ops->validate = nft_target_validate;
ops->data = target;
ops->reduce = NFT_REDUCE_READONLY;
if (family == NFPROTO_BRIDGE)
ops->eval = nft_target_eval_bridge;
else
ops->eval = nft_target_eval_xt;
return ops;
err:
module_put(target->me);
return ERR_PTR(err);
}
static void nft_target_release_ops(const struct nft_expr_ops *ops)
{
struct xt_target *target = ops->data;
module_put(target->me);
kfree(ops);
}
static struct nft_expr_type nft_target_type __read_mostly = {
.name = "target",
.select_ops = nft_target_select_ops,
.release_ops = nft_target_release_ops,
.policy = nft_target_policy,
.maxattr = NFTA_TARGET_MAX,
.owner = THIS_MODULE,
};
static int __init nft_compat_module_init(void)
{
int ret;
ret = nft_register_expr(&nft_match_type);
if (ret < 0)
return ret;
ret = nft_register_expr(&nft_target_type);
if (ret < 0)
goto err_match;
ret = nfnetlink_subsys_register(&nfnl_compat_subsys);
if (ret < 0) {
pr_err("nft_compat: cannot register with nfnetlink.\n");
goto err_target;
}
return ret;
err_target:
nft_unregister_expr(&nft_target_type);
err_match:
nft_unregister_expr(&nft_match_type);
return ret;
}
static void __exit nft_compat_module_exit(void)
{
nfnetlink_subsys_unregister(&nfnl_compat_subsys);
nft_unregister_expr(&nft_target_type);
nft_unregister_expr(&nft_match_type);
}
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFT_COMPAT);
module_init(nft_compat_module_init);
module_exit(nft_compat_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <[email protected]>");
MODULE_ALIAS_NFT_EXPR("match");
MODULE_ALIAS_NFT_EXPR("target");
MODULE_DESCRIPTION("x_tables over nftables support");
| linux-master | net/netfilter/nft_compat.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/netfilter.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/vmalloc.h>
#include <linux/stddef.h>
#include <linux/err.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_bridge.h>
#include <net/netfilter/nf_log.h>
#include <linux/ip.h>
#include <linux/icmp.h>
#include <linux/sysctl.h>
#include <net/route.h>
#include <net/ip.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
#include <net/netfilter/nf_nat_helper.h>
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
#include <linux/ipv6.h>
#include <linux/in6.h>
#include <net/ipv6.h>
#include <net/inet_frag.h>
static DEFINE_MUTEX(nf_ct_proto_mutex);
#ifdef CONFIG_SYSCTL
__printf(4, 5)
void nf_l4proto_log_invalid(const struct sk_buff *skb,
const struct nf_hook_state *state,
u8 protonum,
const char *fmt, ...)
{
struct net *net = state->net;
struct va_format vaf;
va_list args;
if (net->ct.sysctl_log_invalid != protonum &&
net->ct.sysctl_log_invalid != IPPROTO_RAW)
return;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
nf_log_packet(net, state->pf, 0, skb, state->in, state->out,
NULL, "nf_ct_proto_%d: %pV ", protonum, &vaf);
va_end(args);
}
EXPORT_SYMBOL_GPL(nf_l4proto_log_invalid);
__printf(4, 5)
void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
const struct nf_conn *ct,
const struct nf_hook_state *state,
const char *fmt, ...)
{
struct va_format vaf;
struct net *net;
va_list args;
net = nf_ct_net(ct);
if (likely(net->ct.sysctl_log_invalid == 0))
return;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
nf_l4proto_log_invalid(skb, state,
nf_ct_protonum(ct), "%pV", &vaf);
va_end(args);
}
EXPORT_SYMBOL_GPL(nf_ct_l4proto_log_invalid);
#endif
const struct nf_conntrack_l4proto *nf_ct_l4proto_find(u8 l4proto)
{
switch (l4proto) {
case IPPROTO_UDP: return &nf_conntrack_l4proto_udp;
case IPPROTO_TCP: return &nf_conntrack_l4proto_tcp;
case IPPROTO_ICMP: return &nf_conntrack_l4proto_icmp;
#ifdef CONFIG_NF_CT_PROTO_DCCP
case IPPROTO_DCCP: return &nf_conntrack_l4proto_dccp;
#endif
#ifdef CONFIG_NF_CT_PROTO_SCTP
case IPPROTO_SCTP: return &nf_conntrack_l4proto_sctp;
#endif
#ifdef CONFIG_NF_CT_PROTO_UDPLITE
case IPPROTO_UDPLITE: return &nf_conntrack_l4proto_udplite;
#endif
#ifdef CONFIG_NF_CT_PROTO_GRE
case IPPROTO_GRE: return &nf_conntrack_l4proto_gre;
#endif
#if IS_ENABLED(CONFIG_IPV6)
case IPPROTO_ICMPV6: return &nf_conntrack_l4proto_icmpv6;
#endif /* CONFIG_IPV6 */
}
return &nf_conntrack_l4proto_generic;
};
EXPORT_SYMBOL_GPL(nf_ct_l4proto_find);
static bool in_vrf_postrouting(const struct nf_hook_state *state)
{
#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
if (state->hook == NF_INET_POST_ROUTING &&
netif_is_l3_master(state->out))
return true;
#endif
return false;
}
unsigned int nf_confirm(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
const struct nf_conn_help *help;
enum ip_conntrack_info ctinfo;
unsigned int protoff;
struct nf_conn *ct;
bool seqadj_needed;
__be16 frag_off;
int start;
u8 pnum;
ct = nf_ct_get(skb, &ctinfo);
if (!ct || in_vrf_postrouting(state))
return NF_ACCEPT;
help = nfct_help(ct);
seqadj_needed = test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) && !nf_is_loopback_packet(skb);
if (!help && !seqadj_needed)
return nf_conntrack_confirm(skb);
/* helper->help() do not expect ICMP packets */
if (ctinfo == IP_CT_RELATED_REPLY)
return nf_conntrack_confirm(skb);
switch (nf_ct_l3num(ct)) {
case NFPROTO_IPV4:
protoff = skb_network_offset(skb) + ip_hdrlen(skb);
break;
case NFPROTO_IPV6:
pnum = ipv6_hdr(skb)->nexthdr;
start = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum, &frag_off);
if (start < 0 || (frag_off & htons(~0x7)) != 0)
return nf_conntrack_confirm(skb);
protoff = start;
break;
default:
return nf_conntrack_confirm(skb);
}
if (help) {
const struct nf_conntrack_helper *helper;
int ret;
/* rcu_read_lock()ed by nf_hook */
helper = rcu_dereference(help->helper);
if (helper) {
ret = helper->help(skb,
protoff,
ct, ctinfo);
if (ret != NF_ACCEPT)
return ret;
}
}
if (seqadj_needed &&
!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) {
NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
return NF_DROP;
}
/* We've seen it coming out the other side: confirm it */
return nf_conntrack_confirm(skb);
}
EXPORT_SYMBOL_GPL(nf_confirm);
static unsigned int ipv4_conntrack_in(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
return nf_conntrack_in(skb, state);
}
static unsigned int ipv4_conntrack_local(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
if (ip_is_fragment(ip_hdr(skb))) { /* IP_NODEFRAG setsockopt set */
enum ip_conntrack_info ctinfo;
struct nf_conn *tmpl;
tmpl = nf_ct_get(skb, &ctinfo);
if (tmpl && nf_ct_is_template(tmpl)) {
/* when skipping ct, clear templates to avoid fooling
* later targets/matches
*/
skb->_nfct = 0;
nf_ct_put(tmpl);
}
return NF_ACCEPT;
}
return nf_conntrack_in(skb, state);
}
/* Connection tracking may drop packets, but never alters them, so
* make it the first hook.
*/
static const struct nf_hook_ops ipv4_conntrack_ops[] = {
{
.hook = ipv4_conntrack_in,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP_PRI_CONNTRACK,
},
{
.hook = ipv4_conntrack_local,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_CONNTRACK,
},
{
.hook = nf_confirm,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP_PRI_CONNTRACK_CONFIRM,
},
{
.hook = nf_confirm,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_CONNTRACK_CONFIRM,
},
};
/* Fast function for those who don't want to parse /proc (and I don't
* blame them).
* Reversing the socket's dst/src point of view gives us the reply
* mapping.
*/
static int
getorigdst(struct sock *sk, int optval, void __user *user, int *len)
{
const struct inet_sock *inet = inet_sk(sk);
const struct nf_conntrack_tuple_hash *h;
struct nf_conntrack_tuple tuple;
memset(&tuple, 0, sizeof(tuple));
lock_sock(sk);
tuple.src.u3.ip = inet->inet_rcv_saddr;
tuple.src.u.tcp.port = inet->inet_sport;
tuple.dst.u3.ip = inet->inet_daddr;
tuple.dst.u.tcp.port = inet->inet_dport;
tuple.src.l3num = PF_INET;
tuple.dst.protonum = sk->sk_protocol;
release_sock(sk);
/* We only do TCP and SCTP at the moment: is there a better way? */
if (tuple.dst.protonum != IPPROTO_TCP &&
tuple.dst.protonum != IPPROTO_SCTP)
return -ENOPROTOOPT;
if ((unsigned int)*len < sizeof(struct sockaddr_in))
return -EINVAL;
h = nf_conntrack_find_get(sock_net(sk), &nf_ct_zone_dflt, &tuple);
if (h) {
struct sockaddr_in sin;
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
sin.sin_family = AF_INET;
sin.sin_port = ct->tuplehash[IP_CT_DIR_ORIGINAL]
.tuple.dst.u.tcp.port;
sin.sin_addr.s_addr = ct->tuplehash[IP_CT_DIR_ORIGINAL]
.tuple.dst.u3.ip;
memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
nf_ct_put(ct);
if (copy_to_user(user, &sin, sizeof(sin)) != 0)
return -EFAULT;
else
return 0;
}
return -ENOENT;
}
static struct nf_sockopt_ops so_getorigdst = {
.pf = PF_INET,
.get_optmin = SO_ORIGINAL_DST,
.get_optmax = SO_ORIGINAL_DST + 1,
.get = getorigdst,
.owner = THIS_MODULE,
};
#if IS_ENABLED(CONFIG_IPV6)
static int
ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)
{
struct nf_conntrack_tuple tuple = { .src.l3num = NFPROTO_IPV6 };
const struct ipv6_pinfo *inet6 = inet6_sk(sk);
const struct inet_sock *inet = inet_sk(sk);
const struct nf_conntrack_tuple_hash *h;
struct sockaddr_in6 sin6;
struct nf_conn *ct;
__be32 flow_label;
int bound_dev_if;
lock_sock(sk);
tuple.src.u3.in6 = sk->sk_v6_rcv_saddr;
tuple.src.u.tcp.port = inet->inet_sport;
tuple.dst.u3.in6 = sk->sk_v6_daddr;
tuple.dst.u.tcp.port = inet->inet_dport;
tuple.dst.protonum = sk->sk_protocol;
bound_dev_if = sk->sk_bound_dev_if;
flow_label = inet6->flow_label;
release_sock(sk);
if (tuple.dst.protonum != IPPROTO_TCP &&
tuple.dst.protonum != IPPROTO_SCTP)
return -ENOPROTOOPT;
if (*len < 0 || (unsigned int)*len < sizeof(sin6))
return -EINVAL;
h = nf_conntrack_find_get(sock_net(sk), &nf_ct_zone_dflt, &tuple);
if (!h)
return -ENOENT;
ct = nf_ct_tuplehash_to_ctrack(h);
sin6.sin6_family = AF_INET6;
sin6.sin6_port = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.tcp.port;
sin6.sin6_flowinfo = flow_label & IPV6_FLOWINFO_MASK;
memcpy(&sin6.sin6_addr,
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.in6,
sizeof(sin6.sin6_addr));
nf_ct_put(ct);
sin6.sin6_scope_id = ipv6_iface_scope_id(&sin6.sin6_addr, bound_dev_if);
return copy_to_user(user, &sin6, sizeof(sin6)) ? -EFAULT : 0;
}
static struct nf_sockopt_ops so_getorigdst6 = {
.pf = NFPROTO_IPV6,
.get_optmin = IP6T_SO_ORIGINAL_DST,
.get_optmax = IP6T_SO_ORIGINAL_DST + 1,
.get = ipv6_getorigdst,
.owner = THIS_MODULE,
};
static unsigned int ipv6_conntrack_in(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
return nf_conntrack_in(skb, state);
}
static unsigned int ipv6_conntrack_local(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
return nf_conntrack_in(skb, state);
}
static const struct nf_hook_ops ipv6_conntrack_ops[] = {
{
.hook = ipv6_conntrack_in,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP6_PRI_CONNTRACK,
},
{
.hook = ipv6_conntrack_local,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP6_PRI_CONNTRACK,
},
{
.hook = nf_confirm,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP6_PRI_LAST,
},
{
.hook = nf_confirm,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP6_PRI_LAST - 1,
},
};
#endif
static int nf_ct_tcp_fixup(struct nf_conn *ct, void *_nfproto)
{
u8 nfproto = (unsigned long)_nfproto;
if (nf_ct_l3num(ct) != nfproto)
return 0;
if (nf_ct_protonum(ct) == IPPROTO_TCP &&
ct->proto.tcp.state == TCP_CONNTRACK_ESTABLISHED) {
ct->proto.tcp.seen[0].td_maxwin = 0;
ct->proto.tcp.seen[1].td_maxwin = 0;
}
return 0;
}
static struct nf_ct_bridge_info *nf_ct_bridge_info;
static int nf_ct_netns_do_get(struct net *net, u8 nfproto)
{
struct nf_conntrack_net *cnet = nf_ct_pernet(net);
bool fixup_needed = false, retry = true;
int err = 0;
retry:
mutex_lock(&nf_ct_proto_mutex);
switch (nfproto) {
case NFPROTO_IPV4:
cnet->users4++;
if (cnet->users4 > 1)
goto out_unlock;
err = nf_defrag_ipv4_enable(net);
if (err) {
cnet->users4 = 0;
goto out_unlock;
}
err = nf_register_net_hooks(net, ipv4_conntrack_ops,
ARRAY_SIZE(ipv4_conntrack_ops));
if (err)
cnet->users4 = 0;
else
fixup_needed = true;
break;
#if IS_ENABLED(CONFIG_IPV6)
case NFPROTO_IPV6:
cnet->users6++;
if (cnet->users6 > 1)
goto out_unlock;
err = nf_defrag_ipv6_enable(net);
if (err < 0) {
cnet->users6 = 0;
goto out_unlock;
}
err = nf_register_net_hooks(net, ipv6_conntrack_ops,
ARRAY_SIZE(ipv6_conntrack_ops));
if (err)
cnet->users6 = 0;
else
fixup_needed = true;
break;
#endif
case NFPROTO_BRIDGE:
if (!nf_ct_bridge_info) {
if (!retry) {
err = -EPROTO;
goto out_unlock;
}
mutex_unlock(&nf_ct_proto_mutex);
request_module("nf_conntrack_bridge");
retry = false;
goto retry;
}
if (!try_module_get(nf_ct_bridge_info->me)) {
err = -EPROTO;
goto out_unlock;
}
cnet->users_bridge++;
if (cnet->users_bridge > 1)
goto out_unlock;
err = nf_register_net_hooks(net, nf_ct_bridge_info->ops,
nf_ct_bridge_info->ops_size);
if (err)
cnet->users_bridge = 0;
else
fixup_needed = true;
break;
default:
err = -EPROTO;
break;
}
out_unlock:
mutex_unlock(&nf_ct_proto_mutex);
if (fixup_needed) {
struct nf_ct_iter_data iter_data = {
.net = net,
.data = (void *)(unsigned long)nfproto,
};
nf_ct_iterate_cleanup_net(nf_ct_tcp_fixup, &iter_data);
}
return err;
}
static void nf_ct_netns_do_put(struct net *net, u8 nfproto)
{
struct nf_conntrack_net *cnet = nf_ct_pernet(net);
mutex_lock(&nf_ct_proto_mutex);
switch (nfproto) {
case NFPROTO_IPV4:
if (cnet->users4 && (--cnet->users4 == 0)) {
nf_unregister_net_hooks(net, ipv4_conntrack_ops,
ARRAY_SIZE(ipv4_conntrack_ops));
nf_defrag_ipv4_disable(net);
}
break;
#if IS_ENABLED(CONFIG_IPV6)
case NFPROTO_IPV6:
if (cnet->users6 && (--cnet->users6 == 0)) {
nf_unregister_net_hooks(net, ipv6_conntrack_ops,
ARRAY_SIZE(ipv6_conntrack_ops));
nf_defrag_ipv6_disable(net);
}
break;
#endif
case NFPROTO_BRIDGE:
if (!nf_ct_bridge_info)
break;
if (cnet->users_bridge && (--cnet->users_bridge == 0))
nf_unregister_net_hooks(net, nf_ct_bridge_info->ops,
nf_ct_bridge_info->ops_size);
module_put(nf_ct_bridge_info->me);
break;
}
mutex_unlock(&nf_ct_proto_mutex);
}
static int nf_ct_netns_inet_get(struct net *net)
{
int err;
err = nf_ct_netns_do_get(net, NFPROTO_IPV4);
#if IS_ENABLED(CONFIG_IPV6)
if (err < 0)
goto err1;
err = nf_ct_netns_do_get(net, NFPROTO_IPV6);
if (err < 0)
goto err2;
return err;
err2:
nf_ct_netns_put(net, NFPROTO_IPV4);
err1:
#endif
return err;
}
int nf_ct_netns_get(struct net *net, u8 nfproto)
{
int err;
switch (nfproto) {
case NFPROTO_INET:
err = nf_ct_netns_inet_get(net);
break;
case NFPROTO_BRIDGE:
err = nf_ct_netns_do_get(net, NFPROTO_BRIDGE);
if (err < 0)
return err;
err = nf_ct_netns_inet_get(net);
if (err < 0) {
nf_ct_netns_put(net, NFPROTO_BRIDGE);
return err;
}
break;
default:
err = nf_ct_netns_do_get(net, nfproto);
break;
}
return err;
}
EXPORT_SYMBOL_GPL(nf_ct_netns_get);
void nf_ct_netns_put(struct net *net, uint8_t nfproto)
{
switch (nfproto) {
case NFPROTO_BRIDGE:
nf_ct_netns_do_put(net, NFPROTO_BRIDGE);
fallthrough;
case NFPROTO_INET:
nf_ct_netns_do_put(net, NFPROTO_IPV4);
nf_ct_netns_do_put(net, NFPROTO_IPV6);
break;
default:
nf_ct_netns_do_put(net, nfproto);
break;
}
}
EXPORT_SYMBOL_GPL(nf_ct_netns_put);
void nf_ct_bridge_register(struct nf_ct_bridge_info *info)
{
WARN_ON(nf_ct_bridge_info);
mutex_lock(&nf_ct_proto_mutex);
nf_ct_bridge_info = info;
mutex_unlock(&nf_ct_proto_mutex);
}
EXPORT_SYMBOL_GPL(nf_ct_bridge_register);
void nf_ct_bridge_unregister(struct nf_ct_bridge_info *info)
{
WARN_ON(!nf_ct_bridge_info);
mutex_lock(&nf_ct_proto_mutex);
nf_ct_bridge_info = NULL;
mutex_unlock(&nf_ct_proto_mutex);
}
EXPORT_SYMBOL_GPL(nf_ct_bridge_unregister);
int nf_conntrack_proto_init(void)
{
int ret;
ret = nf_register_sockopt(&so_getorigdst);
if (ret < 0)
return ret;
#if IS_ENABLED(CONFIG_IPV6)
ret = nf_register_sockopt(&so_getorigdst6);
if (ret < 0)
goto cleanup_sockopt;
#endif
return ret;
#if IS_ENABLED(CONFIG_IPV6)
cleanup_sockopt:
nf_unregister_sockopt(&so_getorigdst);
#endif
return ret;
}
void nf_conntrack_proto_fini(void)
{
nf_unregister_sockopt(&so_getorigdst);
#if IS_ENABLED(CONFIG_IPV6)
nf_unregister_sockopt(&so_getorigdst6);
#endif
}
void nf_conntrack_proto_pernet_init(struct net *net)
{
nf_conntrack_generic_init_net(net);
nf_conntrack_udp_init_net(net);
nf_conntrack_tcp_init_net(net);
nf_conntrack_icmp_init_net(net);
#if IS_ENABLED(CONFIG_IPV6)
nf_conntrack_icmpv6_init_net(net);
#endif
#ifdef CONFIG_NF_CT_PROTO_DCCP
nf_conntrack_dccp_init_net(net);
#endif
#ifdef CONFIG_NF_CT_PROTO_SCTP
nf_conntrack_sctp_init_net(net);
#endif
#ifdef CONFIG_NF_CT_PROTO_GRE
nf_conntrack_gre_init_net(net);
#endif
}
module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
&nf_conntrack_htable_size, 0600);
MODULE_ALIAS("ip_conntrack");
MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET));
MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET6));
MODULE_LICENSE("GPL");
| linux-master | net/netfilter/nf_conntrack_proto.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Kernel module to match ESP parameters. */
/* (C) 1999-2000 Yon Uriarte <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/netfilter/xt_esp.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Yon Uriarte <[email protected]>");
MODULE_DESCRIPTION("Xtables: IPsec-ESP packet match");
MODULE_ALIAS("ipt_esp");
MODULE_ALIAS("ip6t_esp");
/* Returns 1 if the spi is matched by the range, 0 otherwise */
static inline bool
spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, bool invert)
{
bool r;
pr_debug("spi_match:%c 0x%x <= 0x%x <= 0x%x\n",
invert ? '!' : ' ', min, spi, max);
r = (spi >= min && spi <= max) ^ invert;
pr_debug(" result %s\n", r ? "PASS" : "FAILED");
return r;
}
static bool esp_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ip_esp_hdr *eh;
struct ip_esp_hdr _esp;
const struct xt_esp *espinfo = par->matchinfo;
/* Must not be a fragment. */
if (par->fragoff != 0)
return false;
eh = skb_header_pointer(skb, par->thoff, sizeof(_esp), &_esp);
if (eh == NULL) {
/* We've been asked to examine this packet, and we
* can't. Hence, no choice but to drop.
*/
pr_debug("Dropping evil ESP tinygram.\n");
par->hotdrop = true;
return false;
}
return spi_match(espinfo->spis[0], espinfo->spis[1], ntohl(eh->spi),
!!(espinfo->invflags & XT_ESP_INV_SPI));
}
static int esp_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_esp *espinfo = par->matchinfo;
if (espinfo->invflags & ~XT_ESP_INV_MASK) {
pr_debug("unknown flags %X\n", espinfo->invflags);
return -EINVAL;
}
return 0;
}
static struct xt_match esp_mt_reg[] __read_mostly = {
{
.name = "esp",
.family = NFPROTO_IPV4,
.checkentry = esp_mt_check,
.match = esp_mt,
.matchsize = sizeof(struct xt_esp),
.proto = IPPROTO_ESP,
.me = THIS_MODULE,
},
{
.name = "esp",
.family = NFPROTO_IPV6,
.checkentry = esp_mt_check,
.match = esp_mt,
.matchsize = sizeof(struct xt_esp),
.proto = IPPROTO_ESP,
.me = THIS_MODULE,
},
};
static int __init esp_mt_init(void)
{
return xt_register_matches(esp_mt_reg, ARRAY_SIZE(esp_mt_reg));
}
static void __exit esp_mt_exit(void)
{
xt_unregister_matches(esp_mt_reg, ARRAY_SIZE(esp_mt_reg));
}
module_init(esp_mt_init);
module_exit(esp_mt_exit);
| linux-master | net/netfilter/xt_esp.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Kernel module to match packet length. */
/* (C) 1999-2001 James Morris <[email protected]>
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ipv6.h>
#include <net/ip.h>
#include <linux/netfilter/xt_length.h>
#include <linux/netfilter/x_tables.h>
MODULE_AUTHOR("James Morris <[email protected]>");
MODULE_DESCRIPTION("Xtables: Packet length (Layer3,4,5) match");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_length");
MODULE_ALIAS("ip6t_length");
static bool
length_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_length_info *info = par->matchinfo;
u32 pktlen = skb_ip_totlen(skb);
return (pktlen >= info->min && pktlen <= info->max) ^ info->invert;
}
static bool
length_mt6(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_length_info *info = par->matchinfo;
u32 pktlen = skb->len;
return (pktlen >= info->min && pktlen <= info->max) ^ info->invert;
}
static struct xt_match length_mt_reg[] __read_mostly = {
{
.name = "length",
.family = NFPROTO_IPV4,
.match = length_mt,
.matchsize = sizeof(struct xt_length_info),
.me = THIS_MODULE,
},
{
.name = "length",
.family = NFPROTO_IPV6,
.match = length_mt6,
.matchsize = sizeof(struct xt_length_info),
.me = THIS_MODULE,
},
};
static int __init length_mt_init(void)
{
return xt_register_matches(length_mt_reg, ARRAY_SIZE(length_mt_reg));
}
static void __exit length_mt_exit(void)
{
xt_unregister_matches(length_mt_reg, ARRAY_SIZE(length_mt_reg));
}
module_init(length_mt_init);
module_exit(length_mt_exit);
| linux-master | net/netfilter/xt_length.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* DCCP connection tracking protocol helper
*
* Copyright (c) 2005, 2006, 2008 Patrick McHardy <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sysctl.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/dccp.h>
#include <linux/slab.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_log.h>
/* Timeouts are based on values from RFC4340:
*
* - REQUEST:
*
* 8.1.2. Client Request
*
* A client MAY give up on its DCCP-Requests after some time
* (3 minutes, for example).
*
* - RESPOND:
*
* 8.1.3. Server Response
*
* It MAY also leave the RESPOND state for CLOSED after a timeout of
* not less than 4MSL (8 minutes);
*
* - PARTOPEN:
*
* 8.1.5. Handshake Completion
*
* If the client remains in PARTOPEN for more than 4MSL (8 minutes),
* it SHOULD reset the connection with Reset Code 2, "Aborted".
*
* - OPEN:
*
* The DCCP timestamp overflows after 11.9 hours. If the connection
* stays idle this long the sequence number won't be recognized
* as valid anymore.
*
* - CLOSEREQ/CLOSING:
*
* 8.3. Termination
*
* The retransmission timer should initially be set to go off in two
* round-trip times and should back off to not less than once every
* 64 seconds ...
*
* - TIMEWAIT:
*
* 4.3. States
*
* A server or client socket remains in this state for 2MSL (4 minutes)
* after the connection has been town down, ...
*/
#define DCCP_MSL (2 * 60 * HZ)
#ifdef CONFIG_NF_CONNTRACK_PROCFS
static const char * const dccp_state_names[] = {
[CT_DCCP_NONE] = "NONE",
[CT_DCCP_REQUEST] = "REQUEST",
[CT_DCCP_RESPOND] = "RESPOND",
[CT_DCCP_PARTOPEN] = "PARTOPEN",
[CT_DCCP_OPEN] = "OPEN",
[CT_DCCP_CLOSEREQ] = "CLOSEREQ",
[CT_DCCP_CLOSING] = "CLOSING",
[CT_DCCP_TIMEWAIT] = "TIMEWAIT",
[CT_DCCP_IGNORE] = "IGNORE",
[CT_DCCP_INVALID] = "INVALID",
};
#endif
#define sNO CT_DCCP_NONE
#define sRQ CT_DCCP_REQUEST
#define sRS CT_DCCP_RESPOND
#define sPO CT_DCCP_PARTOPEN
#define sOP CT_DCCP_OPEN
#define sCR CT_DCCP_CLOSEREQ
#define sCG CT_DCCP_CLOSING
#define sTW CT_DCCP_TIMEWAIT
#define sIG CT_DCCP_IGNORE
#define sIV CT_DCCP_INVALID
/*
* DCCP state transition table
*
* The assumption is the same as for TCP tracking:
*
* We are the man in the middle. All the packets go through us but might
* get lost in transit to the destination. It is assumed that the destination
* can't receive segments we haven't seen.
*
* The following states exist:
*
* NONE: Initial state, expecting Request
* REQUEST: Request seen, waiting for Response from server
* RESPOND: Response from server seen, waiting for Ack from client
* PARTOPEN: Ack after Response seen, waiting for packet other than Response,
* Reset or Sync from server
* OPEN: Packet other than Response, Reset or Sync seen
* CLOSEREQ: CloseReq from server seen, expecting Close from client
* CLOSING: Close seen, expecting Reset
* TIMEWAIT: Reset seen
* IGNORE: Not determinable whether packet is valid
*
* Some states exist only on one side of the connection: REQUEST, RESPOND,
* PARTOPEN, CLOSEREQ. For the other side these states are equivalent to
* the one it was in before.
*
* Packets are marked as ignored (sIG) if we don't know if they're valid
* (for example a reincarnation of a connection we didn't notice is dead
* already) and the server may send back a connection closing Reset or a
* Response. They're also used for Sync/SyncAck packets, which we don't
* care about.
*/
static const u_int8_t
dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] = {
[CT_DCCP_ROLE_CLIENT] = {
[DCCP_PKT_REQUEST] = {
/*
* sNO -> sRQ Regular Request
* sRQ -> sRQ Retransmitted Request or reincarnation
* sRS -> sRS Retransmitted Request (apparently Response
* got lost after we saw it) or reincarnation
* sPO -> sIG Ignore, conntrack might be out of sync
* sOP -> sIG Ignore, conntrack might be out of sync
* sCR -> sIG Ignore, conntrack might be out of sync
* sCG -> sIG Ignore, conntrack might be out of sync
* sTW -> sRQ Reincarnation
*
* sNO, sRQ, sRS, sPO. sOP, sCR, sCG, sTW, */
sRQ, sRQ, sRS, sIG, sIG, sIG, sIG, sRQ,
},
[DCCP_PKT_RESPONSE] = {
/*
* sNO -> sIV Invalid
* sRQ -> sIG Ignore, might be response to ignored Request
* sRS -> sIG Ignore, might be response to ignored Request
* sPO -> sIG Ignore, might be response to ignored Request
* sOP -> sIG Ignore, might be response to ignored Request
* sCR -> sIG Ignore, might be response to ignored Request
* sCG -> sIG Ignore, might be response to ignored Request
* sTW -> sIV Invalid, reincarnation in reverse direction
* goes through sRQ
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIV,
},
[DCCP_PKT_ACK] = {
/*
* sNO -> sIV No connection
* sRQ -> sIV No connection
* sRS -> sPO Ack for Response, move to PARTOPEN (8.1.5.)
* sPO -> sPO Retransmitted Ack for Response, remain in PARTOPEN
* sOP -> sOP Regular ACK, remain in OPEN
* sCR -> sCR Ack in CLOSEREQ MAY be processed (8.3.)
* sCG -> sCG Ack in CLOSING MAY be processed (8.3.)
* sTW -> sIV
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIV, sIV, sPO, sPO, sOP, sCR, sCG, sIV
},
[DCCP_PKT_DATA] = {
/*
* sNO -> sIV No connection
* sRQ -> sIV No connection
* sRS -> sIV No connection
* sPO -> sIV MUST use DataAck in PARTOPEN state (8.1.5.)
* sOP -> sOP Regular Data packet
* sCR -> sCR Data in CLOSEREQ MAY be processed (8.3.)
* sCG -> sCG Data in CLOSING MAY be processed (8.3.)
* sTW -> sIV
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIV, sIV, sIV, sIV, sOP, sCR, sCG, sIV,
},
[DCCP_PKT_DATAACK] = {
/*
* sNO -> sIV No connection
* sRQ -> sIV No connection
* sRS -> sPO Ack for Response, move to PARTOPEN (8.1.5.)
* sPO -> sPO Remain in PARTOPEN state
* sOP -> sOP Regular DataAck packet in OPEN state
* sCR -> sCR DataAck in CLOSEREQ MAY be processed (8.3.)
* sCG -> sCG DataAck in CLOSING MAY be processed (8.3.)
* sTW -> sIV
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIV, sIV, sPO, sPO, sOP, sCR, sCG, sIV
},
[DCCP_PKT_CLOSEREQ] = {
/*
* CLOSEREQ may only be sent by the server.
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV
},
[DCCP_PKT_CLOSE] = {
/*
* sNO -> sIV No connection
* sRQ -> sIV No connection
* sRS -> sIV No connection
* sPO -> sCG Client-initiated close
* sOP -> sCG Client-initiated close
* sCR -> sCG Close in response to CloseReq (8.3.)
* sCG -> sCG Retransmit
* sTW -> sIV Late retransmit, already in TIME_WAIT
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIV, sIV, sIV, sCG, sCG, sCG, sIV, sIV
},
[DCCP_PKT_RESET] = {
/*
* sNO -> sIV No connection
* sRQ -> sTW Sync received or timeout, SHOULD send Reset (8.1.1.)
* sRS -> sTW Response received without Request
* sPO -> sTW Timeout, SHOULD send Reset (8.1.5.)
* sOP -> sTW Connection reset
* sCR -> sTW Connection reset
* sCG -> sTW Connection reset
* sTW -> sIG Ignore (don't refresh timer)
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIV, sTW, sTW, sTW, sTW, sTW, sTW, sIG
},
[DCCP_PKT_SYNC] = {
/*
* We currently ignore Sync packets
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
},
[DCCP_PKT_SYNCACK] = {
/*
* We currently ignore SyncAck packets
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
},
},
[CT_DCCP_ROLE_SERVER] = {
[DCCP_PKT_REQUEST] = {
/*
* sNO -> sIV Invalid
* sRQ -> sIG Ignore, conntrack might be out of sync
* sRS -> sIG Ignore, conntrack might be out of sync
* sPO -> sIG Ignore, conntrack might be out of sync
* sOP -> sIG Ignore, conntrack might be out of sync
* sCR -> sIG Ignore, conntrack might be out of sync
* sCG -> sIG Ignore, conntrack might be out of sync
* sTW -> sRQ Reincarnation, must reverse roles
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIV, sIG, sIG, sIG, sIG, sIG, sIG, sRQ
},
[DCCP_PKT_RESPONSE] = {
/*
* sNO -> sIV Response without Request
* sRQ -> sRS Response to clients Request
* sRS -> sRS Retransmitted Response (8.1.3. SHOULD NOT)
* sPO -> sIG Response to an ignored Request or late retransmit
* sOP -> sIG Ignore, might be response to ignored Request
* sCR -> sIG Ignore, might be response to ignored Request
* sCG -> sIG Ignore, might be response to ignored Request
* sTW -> sIV Invalid, Request from client in sTW moves to sRQ
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIV, sRS, sRS, sIG, sIG, sIG, sIG, sIV
},
[DCCP_PKT_ACK] = {
/*
* sNO -> sIV No connection
* sRQ -> sIV No connection
* sRS -> sIV No connection
* sPO -> sOP Enter OPEN state (8.1.5.)
* sOP -> sOP Regular Ack in OPEN state
* sCR -> sIV Waiting for Close from client
* sCG -> sCG Ack in CLOSING MAY be processed (8.3.)
* sTW -> sIV
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIV, sIV, sIV, sOP, sOP, sIV, sCG, sIV
},
[DCCP_PKT_DATA] = {
/*
* sNO -> sIV No connection
* sRQ -> sIV No connection
* sRS -> sIV No connection
* sPO -> sOP Enter OPEN state (8.1.5.)
* sOP -> sOP Regular Data packet in OPEN state
* sCR -> sIV Waiting for Close from client
* sCG -> sCG Data in CLOSING MAY be processed (8.3.)
* sTW -> sIV
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIV, sIV, sIV, sOP, sOP, sIV, sCG, sIV
},
[DCCP_PKT_DATAACK] = {
/*
* sNO -> sIV No connection
* sRQ -> sIV No connection
* sRS -> sIV No connection
* sPO -> sOP Enter OPEN state (8.1.5.)
* sOP -> sOP Regular DataAck in OPEN state
* sCR -> sIV Waiting for Close from client
* sCG -> sCG Data in CLOSING MAY be processed (8.3.)
* sTW -> sIV
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIV, sIV, sIV, sOP, sOP, sIV, sCG, sIV
},
[DCCP_PKT_CLOSEREQ] = {
/*
* sNO -> sIV No connection
* sRQ -> sIV No connection
* sRS -> sIV No connection
* sPO -> sOP -> sCR Move directly to CLOSEREQ (8.1.5.)
* sOP -> sCR CloseReq in OPEN state
* sCR -> sCR Retransmit
* sCG -> sCR Simultaneous close, client sends another Close
* sTW -> sIV Already closed
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIV, sIV, sIV, sCR, sCR, sCR, sCR, sIV
},
[DCCP_PKT_CLOSE] = {
/*
* sNO -> sIV No connection
* sRQ -> sIV No connection
* sRS -> sIV No connection
* sPO -> sOP -> sCG Move direcly to CLOSING
* sOP -> sCG Move to CLOSING
* sCR -> sIV Close after CloseReq is invalid
* sCG -> sCG Retransmit
* sTW -> sIV Already closed
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIV, sIV, sIV, sCG, sCG, sIV, sCG, sIV
},
[DCCP_PKT_RESET] = {
/*
* sNO -> sIV No connection
* sRQ -> sTW Reset in response to Request
* sRS -> sTW Timeout, SHOULD send Reset (8.1.3.)
* sPO -> sTW Timeout, SHOULD send Reset (8.1.3.)
* sOP -> sTW
* sCR -> sTW
* sCG -> sTW
* sTW -> sIG Ignore (don't refresh timer)
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW, sTW */
sIV, sTW, sTW, sTW, sTW, sTW, sTW, sTW, sIG
},
[DCCP_PKT_SYNC] = {
/*
* We currently ignore Sync packets
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
},
[DCCP_PKT_SYNCACK] = {
/*
* We currently ignore SyncAck packets
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
},
},
};
static noinline bool
dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
const struct dccp_hdr *dh,
const struct nf_hook_state *hook_state)
{
struct net *net = nf_ct_net(ct);
struct nf_dccp_net *dn;
const char *msg;
u_int8_t state;
state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
switch (state) {
default:
dn = nf_dccp_pernet(net);
if (dn->dccp_loose == 0) {
msg = "not picking up existing connection ";
goto out_invalid;
}
break;
case CT_DCCP_REQUEST:
break;
case CT_DCCP_INVALID:
msg = "invalid state transition ";
goto out_invalid;
}
ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_CLIENT;
ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_SERVER;
ct->proto.dccp.state = CT_DCCP_NONE;
ct->proto.dccp.last_pkt = DCCP_PKT_REQUEST;
ct->proto.dccp.last_dir = IP_CT_DIR_ORIGINAL;
ct->proto.dccp.handshake_seq = 0;
return true;
out_invalid:
nf_ct_l4proto_log_invalid(skb, ct, hook_state, "%s", msg);
return false;
}
static u64 dccp_ack_seq(const struct dccp_hdr *dh)
{
const struct dccp_hdr_ack_bits *dhack;
dhack = (void *)dh + __dccp_basic_hdr_len(dh);
return ((u64)ntohs(dhack->dccph_ack_nr_high) << 32) +
ntohl(dhack->dccph_ack_nr_low);
}
static bool dccp_error(const struct dccp_hdr *dh,
struct sk_buff *skb, unsigned int dataoff,
const struct nf_hook_state *state)
{
static const unsigned long require_seq48 = 1 << DCCP_PKT_REQUEST |
1 << DCCP_PKT_RESPONSE |
1 << DCCP_PKT_CLOSEREQ |
1 << DCCP_PKT_CLOSE |
1 << DCCP_PKT_RESET |
1 << DCCP_PKT_SYNC |
1 << DCCP_PKT_SYNCACK;
unsigned int dccp_len = skb->len - dataoff;
unsigned int cscov;
const char *msg;
u8 type;
BUILD_BUG_ON(DCCP_PKT_INVALID >= BITS_PER_LONG);
if (dh->dccph_doff * 4 < sizeof(struct dccp_hdr) ||
dh->dccph_doff * 4 > dccp_len) {
msg = "nf_ct_dccp: truncated/malformed packet ";
goto out_invalid;
}
cscov = dccp_len;
if (dh->dccph_cscov) {
cscov = (dh->dccph_cscov - 1) * 4;
if (cscov > dccp_len) {
msg = "nf_ct_dccp: bad checksum coverage ";
goto out_invalid;
}
}
if (state->hook == NF_INET_PRE_ROUTING &&
state->net->ct.sysctl_checksum &&
nf_checksum_partial(skb, state->hook, dataoff, cscov,
IPPROTO_DCCP, state->pf)) {
msg = "nf_ct_dccp: bad checksum ";
goto out_invalid;
}
type = dh->dccph_type;
if (type >= DCCP_PKT_INVALID) {
msg = "nf_ct_dccp: reserved packet type ";
goto out_invalid;
}
if (test_bit(type, &require_seq48) && !dh->dccph_x) {
msg = "nf_ct_dccp: type lacks 48bit sequence numbers";
goto out_invalid;
}
return false;
out_invalid:
nf_l4proto_log_invalid(skb, state, IPPROTO_DCCP, "%s", msg);
return true;
}
struct nf_conntrack_dccp_buf {
struct dccp_hdr dh; /* generic header part */
struct dccp_hdr_ext ext; /* optional depending dh->dccph_x */
union { /* depends on header type */
struct dccp_hdr_ack_bits ack;
struct dccp_hdr_request req;
struct dccp_hdr_response response;
struct dccp_hdr_reset rst;
} u;
};
static struct dccp_hdr *
dccp_header_pointer(const struct sk_buff *skb, int offset, const struct dccp_hdr *dh,
struct nf_conntrack_dccp_buf *buf)
{
unsigned int hdrlen = __dccp_hdr_len(dh);
if (hdrlen > sizeof(*buf))
return NULL;
return skb_header_pointer(skb, offset, hdrlen, buf);
}
int nf_conntrack_dccp_packet(struct nf_conn *ct, struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
const struct nf_hook_state *state)
{
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
struct nf_conntrack_dccp_buf _dh;
u_int8_t type, old_state, new_state;
enum ct_dccp_roles role;
unsigned int *timeouts;
struct dccp_hdr *dh;
dh = skb_header_pointer(skb, dataoff, sizeof(*dh), &_dh.dh);
if (!dh)
return NF_DROP;
if (dccp_error(dh, skb, dataoff, state))
return -NF_ACCEPT;
/* pull again, including possible 48 bit sequences and subtype header */
dh = dccp_header_pointer(skb, dataoff, dh, &_dh);
if (!dh)
return NF_DROP;
type = dh->dccph_type;
if (!nf_ct_is_confirmed(ct) && !dccp_new(ct, skb, dh, state))
return -NF_ACCEPT;
if (type == DCCP_PKT_RESET &&
!test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
/* Tear down connection immediately if only reply is a RESET */
nf_ct_kill_acct(ct, ctinfo, skb);
return NF_ACCEPT;
}
spin_lock_bh(&ct->lock);
role = ct->proto.dccp.role[dir];
old_state = ct->proto.dccp.state;
new_state = dccp_state_table[role][type][old_state];
switch (new_state) {
case CT_DCCP_REQUEST:
if (old_state == CT_DCCP_TIMEWAIT &&
role == CT_DCCP_ROLE_SERVER) {
/* Reincarnation in the reverse direction: reopen and
* reverse client/server roles. */
ct->proto.dccp.role[dir] = CT_DCCP_ROLE_CLIENT;
ct->proto.dccp.role[!dir] = CT_DCCP_ROLE_SERVER;
}
break;
case CT_DCCP_RESPOND:
if (old_state == CT_DCCP_REQUEST)
ct->proto.dccp.handshake_seq = dccp_hdr_seq(dh);
break;
case CT_DCCP_PARTOPEN:
if (old_state == CT_DCCP_RESPOND &&
type == DCCP_PKT_ACK &&
dccp_ack_seq(dh) == ct->proto.dccp.handshake_seq)
set_bit(IPS_ASSURED_BIT, &ct->status);
break;
case CT_DCCP_IGNORE:
/*
* Connection tracking might be out of sync, so we ignore
* packets that might establish a new connection and resync
* if the server responds with a valid Response.
*/
if (ct->proto.dccp.last_dir == !dir &&
ct->proto.dccp.last_pkt == DCCP_PKT_REQUEST &&
type == DCCP_PKT_RESPONSE) {
ct->proto.dccp.role[!dir] = CT_DCCP_ROLE_CLIENT;
ct->proto.dccp.role[dir] = CT_DCCP_ROLE_SERVER;
ct->proto.dccp.handshake_seq = dccp_hdr_seq(dh);
new_state = CT_DCCP_RESPOND;
break;
}
ct->proto.dccp.last_dir = dir;
ct->proto.dccp.last_pkt = type;
spin_unlock_bh(&ct->lock);
nf_ct_l4proto_log_invalid(skb, ct, state, "%s", "invalid packet");
return NF_ACCEPT;
case CT_DCCP_INVALID:
spin_unlock_bh(&ct->lock);
nf_ct_l4proto_log_invalid(skb, ct, state, "%s", "invalid state transition");
return -NF_ACCEPT;
}
ct->proto.dccp.last_dir = dir;
ct->proto.dccp.last_pkt = type;
ct->proto.dccp.state = new_state;
spin_unlock_bh(&ct->lock);
if (new_state != old_state)
nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
timeouts = nf_ct_timeout_lookup(ct);
if (!timeouts)
timeouts = nf_dccp_pernet(nf_ct_net(ct))->dccp_timeout;
nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
return NF_ACCEPT;
}
static bool dccp_can_early_drop(const struct nf_conn *ct)
{
switch (ct->proto.dccp.state) {
case CT_DCCP_CLOSEREQ:
case CT_DCCP_CLOSING:
case CT_DCCP_TIMEWAIT:
return true;
default:
break;
}
return false;
}
#ifdef CONFIG_NF_CONNTRACK_PROCFS
static void dccp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
{
seq_printf(s, "%s ", dccp_state_names[ct->proto.dccp.state]);
}
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
static int dccp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
struct nf_conn *ct, bool destroy)
{
struct nlattr *nest_parms;
spin_lock_bh(&ct->lock);
nest_parms = nla_nest_start(skb, CTA_PROTOINFO_DCCP);
if (!nest_parms)
goto nla_put_failure;
if (nla_put_u8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state))
goto nla_put_failure;
if (destroy)
goto skip_state;
if (nla_put_u8(skb, CTA_PROTOINFO_DCCP_ROLE,
ct->proto.dccp.role[IP_CT_DIR_ORIGINAL]) ||
nla_put_be64(skb, CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ,
cpu_to_be64(ct->proto.dccp.handshake_seq),
CTA_PROTOINFO_DCCP_PAD))
goto nla_put_failure;
skip_state:
nla_nest_end(skb, nest_parms);
spin_unlock_bh(&ct->lock);
return 0;
nla_put_failure:
spin_unlock_bh(&ct->lock);
return -1;
}
static const struct nla_policy dccp_nla_policy[CTA_PROTOINFO_DCCP_MAX + 1] = {
[CTA_PROTOINFO_DCCP_STATE] = { .type = NLA_U8 },
[CTA_PROTOINFO_DCCP_ROLE] = { .type = NLA_U8 },
[CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ] = { .type = NLA_U64 },
[CTA_PROTOINFO_DCCP_PAD] = { .type = NLA_UNSPEC },
};
#define DCCP_NLATTR_SIZE ( \
NLA_ALIGN(NLA_HDRLEN + 1) + \
NLA_ALIGN(NLA_HDRLEN + 1) + \
NLA_ALIGN(NLA_HDRLEN + sizeof(u64)) + \
NLA_ALIGN(NLA_HDRLEN + 0))
static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct)
{
struct nlattr *attr = cda[CTA_PROTOINFO_DCCP];
struct nlattr *tb[CTA_PROTOINFO_DCCP_MAX + 1];
int err;
if (!attr)
return 0;
err = nla_parse_nested_deprecated(tb, CTA_PROTOINFO_DCCP_MAX, attr,
dccp_nla_policy, NULL);
if (err < 0)
return err;
if (!tb[CTA_PROTOINFO_DCCP_STATE] ||
!tb[CTA_PROTOINFO_DCCP_ROLE] ||
nla_get_u8(tb[CTA_PROTOINFO_DCCP_ROLE]) > CT_DCCP_ROLE_MAX ||
nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]) >= CT_DCCP_IGNORE) {
return -EINVAL;
}
spin_lock_bh(&ct->lock);
ct->proto.dccp.state = nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]);
if (nla_get_u8(tb[CTA_PROTOINFO_DCCP_ROLE]) == CT_DCCP_ROLE_CLIENT) {
ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_CLIENT;
ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_SERVER;
} else {
ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_SERVER;
ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_CLIENT;
}
if (tb[CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ]) {
ct->proto.dccp.handshake_seq =
be64_to_cpu(nla_get_be64(tb[CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ]));
}
spin_unlock_bh(&ct->lock);
return 0;
}
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
struct nf_dccp_net *dn = nf_dccp_pernet(net);
unsigned int *timeouts = data;
int i;
if (!timeouts)
timeouts = dn->dccp_timeout;
/* set default DCCP timeouts. */
for (i=0; i<CT_DCCP_MAX; i++)
timeouts[i] = dn->dccp_timeout[i];
/* there's a 1:1 mapping between attributes and protocol states. */
for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i<CTA_TIMEOUT_DCCP_MAX+1; i++) {
if (tb[i]) {
timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ;
}
}
timeouts[CTA_TIMEOUT_DCCP_UNSPEC] = timeouts[CTA_TIMEOUT_DCCP_REQUEST];
return 0;
}
static int
dccp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
{
const unsigned int *timeouts = data;
int i;
for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i<CTA_TIMEOUT_DCCP_MAX+1; i++) {
if (nla_put_be32(skb, i, htonl(timeouts[i] / HZ)))
goto nla_put_failure;
}
return 0;
nla_put_failure:
return -ENOSPC;
}
static const struct nla_policy
dccp_timeout_nla_policy[CTA_TIMEOUT_DCCP_MAX+1] = {
[CTA_TIMEOUT_DCCP_REQUEST] = { .type = NLA_U32 },
[CTA_TIMEOUT_DCCP_RESPOND] = { .type = NLA_U32 },
[CTA_TIMEOUT_DCCP_PARTOPEN] = { .type = NLA_U32 },
[CTA_TIMEOUT_DCCP_OPEN] = { .type = NLA_U32 },
[CTA_TIMEOUT_DCCP_CLOSEREQ] = { .type = NLA_U32 },
[CTA_TIMEOUT_DCCP_CLOSING] = { .type = NLA_U32 },
[CTA_TIMEOUT_DCCP_TIMEWAIT] = { .type = NLA_U32 },
};
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
void nf_conntrack_dccp_init_net(struct net *net)
{
struct nf_dccp_net *dn = nf_dccp_pernet(net);
/* default values */
dn->dccp_loose = 1;
dn->dccp_timeout[CT_DCCP_REQUEST] = 2 * DCCP_MSL;
dn->dccp_timeout[CT_DCCP_RESPOND] = 4 * DCCP_MSL;
dn->dccp_timeout[CT_DCCP_PARTOPEN] = 4 * DCCP_MSL;
dn->dccp_timeout[CT_DCCP_OPEN] = 12 * 3600 * HZ;
dn->dccp_timeout[CT_DCCP_CLOSEREQ] = 64 * HZ;
dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ;
dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL;
/* timeouts[0] is unused, make it same as SYN_SENT so
* ->timeouts[0] contains 'new' timeout, like udp or icmp.
*/
dn->dccp_timeout[CT_DCCP_NONE] = dn->dccp_timeout[CT_DCCP_REQUEST];
}
const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp = {
.l4proto = IPPROTO_DCCP,
.can_early_drop = dccp_can_early_drop,
#ifdef CONFIG_NF_CONNTRACK_PROCFS
.print_conntrack = dccp_print_conntrack,
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.nlattr_size = DCCP_NLATTR_SIZE,
.to_nlattr = dccp_to_nlattr,
.from_nlattr = nlattr_to_dccp,
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = dccp_timeout_nlattr_to_obj,
.obj_to_nlattr = dccp_timeout_obj_to_nlattr,
.nlattr_max = CTA_TIMEOUT_DCCP_MAX,
.obj_size = sizeof(unsigned int) * CT_DCCP_MAX,
.nla_policy = dccp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
};
| linux-master | net/netfilter/nf_conntrack_proto_dccp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2006 Patrick McHardy <[email protected]>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_NFLOG.h>
#include <net/netfilter/nf_log.h>
MODULE_AUTHOR("Patrick McHardy <[email protected]>");
MODULE_DESCRIPTION("Xtables: packet logging to netlink using NFLOG");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_NFLOG");
MODULE_ALIAS("ip6t_NFLOG");
static unsigned int
nflog_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_nflog_info *info = par->targinfo;
struct net *net = xt_net(par);
struct nf_loginfo li;
li.type = NF_LOG_TYPE_ULOG;
li.u.ulog.copy_len = info->len;
li.u.ulog.group = info->group;
li.u.ulog.qthreshold = info->threshold;
li.u.ulog.flags = 0;
if (info->flags & XT_NFLOG_F_COPY_LEN)
li.u.ulog.flags |= NF_LOG_F_COPY_LEN;
nf_log_packet(net, xt_family(par), xt_hooknum(par), skb, xt_in(par),
xt_out(par), &li, "%s", info->prefix);
return XT_CONTINUE;
}
static int nflog_tg_check(const struct xt_tgchk_param *par)
{
const struct xt_nflog_info *info = par->targinfo;
int ret;
if (info->flags & ~XT_NFLOG_MASK)
return -EINVAL;
if (info->prefix[sizeof(info->prefix) - 1] != '\0')
return -EINVAL;
ret = nf_logger_find_get(par->family, NF_LOG_TYPE_ULOG);
if (ret != 0 && !par->nft_compat) {
request_module("%s", "nfnetlink_log");
ret = nf_logger_find_get(par->family, NF_LOG_TYPE_ULOG);
}
return ret;
}
static void nflog_tg_destroy(const struct xt_tgdtor_param *par)
{
nf_logger_put(par->family, NF_LOG_TYPE_ULOG);
}
static struct xt_target nflog_tg_reg __read_mostly = {
.name = "NFLOG",
.revision = 0,
.family = NFPROTO_UNSPEC,
.checkentry = nflog_tg_check,
.destroy = nflog_tg_destroy,
.target = nflog_tg,
.targetsize = sizeof(struct xt_nflog_info),
.me = THIS_MODULE,
};
static int __init nflog_tg_init(void)
{
return xt_register_target(&nflog_tg_reg);
}
static void __exit nflog_tg_exit(void)
{
xt_unregister_target(&nflog_tg_reg);
}
module_init(nflog_tg_init);
module_exit(nflog_tg_exit);
MODULE_SOFTDEP("pre: nfnetlink_log");
| linux-master | net/netfilter/xt_NFLOG.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* "TEE" target extension for Xtables
* Copyright © Sebastian Claßen, 2007
* Jan Engelhardt, 2007-2010
*
* based on ipt_ROUTE.c from Cédric de Launois
* <[email protected]>
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/route.h>
#include <linux/netfilter/x_tables.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/route.h>
#include <net/netfilter/ipv4/nf_dup_ipv4.h>
#include <net/netfilter/ipv6/nf_dup_ipv6.h>
#include <linux/netfilter/xt_TEE.h>
struct xt_tee_priv {
struct list_head list;
struct xt_tee_tginfo *tginfo;
int oif;
};
static unsigned int tee_net_id __read_mostly;
static const union nf_inet_addr tee_zero_address;
struct tee_net {
struct list_head priv_list;
/* lock protects the priv_list */
struct mutex lock;
};
static unsigned int
tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_tee_tginfo *info = par->targinfo;
int oif = info->priv ? info->priv->oif : 0;
nf_dup_ipv4(xt_net(par), skb, xt_hooknum(par), &info->gw.in, oif);
return XT_CONTINUE;
}
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
static unsigned int
tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_tee_tginfo *info = par->targinfo;
int oif = info->priv ? info->priv->oif : 0;
nf_dup_ipv6(xt_net(par), skb, xt_hooknum(par), &info->gw.in6, oif);
return XT_CONTINUE;
}
#endif
static int tee_netdev_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct net *net = dev_net(dev);
struct tee_net *tn = net_generic(net, tee_net_id);
struct xt_tee_priv *priv;
mutex_lock(&tn->lock);
list_for_each_entry(priv, &tn->priv_list, list) {
switch (event) {
case NETDEV_REGISTER:
if (!strcmp(dev->name, priv->tginfo->oif))
priv->oif = dev->ifindex;
break;
case NETDEV_UNREGISTER:
if (dev->ifindex == priv->oif)
priv->oif = -1;
break;
case NETDEV_CHANGENAME:
if (!strcmp(dev->name, priv->tginfo->oif))
priv->oif = dev->ifindex;
else if (dev->ifindex == priv->oif)
priv->oif = -1;
break;
}
}
mutex_unlock(&tn->lock);
return NOTIFY_DONE;
}
static int tee_tg_check(const struct xt_tgchk_param *par)
{
struct tee_net *tn = net_generic(par->net, tee_net_id);
struct xt_tee_tginfo *info = par->targinfo;
struct xt_tee_priv *priv;
/* 0.0.0.0 and :: not allowed */
if (memcmp(&info->gw, &tee_zero_address,
sizeof(tee_zero_address)) == 0)
return -EINVAL;
if (info->oif[0]) {
struct net_device *dev;
if (info->oif[sizeof(info->oif)-1] != '\0')
return -EINVAL;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (priv == NULL)
return -ENOMEM;
priv->tginfo = info;
priv->oif = -1;
info->priv = priv;
dev = dev_get_by_name(par->net, info->oif);
if (dev) {
priv->oif = dev->ifindex;
dev_put(dev);
}
mutex_lock(&tn->lock);
list_add(&priv->list, &tn->priv_list);
mutex_unlock(&tn->lock);
} else
info->priv = NULL;
static_key_slow_inc(&xt_tee_enabled);
return 0;
}
static void tee_tg_destroy(const struct xt_tgdtor_param *par)
{
struct tee_net *tn = net_generic(par->net, tee_net_id);
struct xt_tee_tginfo *info = par->targinfo;
if (info->priv) {
mutex_lock(&tn->lock);
list_del(&info->priv->list);
mutex_unlock(&tn->lock);
kfree(info->priv);
}
static_key_slow_dec(&xt_tee_enabled);
}
static struct xt_target tee_tg_reg[] __read_mostly = {
{
.name = "TEE",
.revision = 1,
.family = NFPROTO_IPV4,
.target = tee_tg4,
.targetsize = sizeof(struct xt_tee_tginfo),
.usersize = offsetof(struct xt_tee_tginfo, priv),
.checkentry = tee_tg_check,
.destroy = tee_tg_destroy,
.me = THIS_MODULE,
},
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
{
.name = "TEE",
.revision = 1,
.family = NFPROTO_IPV6,
.target = tee_tg6,
.targetsize = sizeof(struct xt_tee_tginfo),
.usersize = offsetof(struct xt_tee_tginfo, priv),
.checkentry = tee_tg_check,
.destroy = tee_tg_destroy,
.me = THIS_MODULE,
},
#endif
};
static int __net_init tee_net_init(struct net *net)
{
struct tee_net *tn = net_generic(net, tee_net_id);
INIT_LIST_HEAD(&tn->priv_list);
mutex_init(&tn->lock);
return 0;
}
static struct pernet_operations tee_net_ops = {
.init = tee_net_init,
.id = &tee_net_id,
.size = sizeof(struct tee_net),
};
static struct notifier_block tee_netdev_notifier = {
.notifier_call = tee_netdev_event,
};
static int __init tee_tg_init(void)
{
int ret;
ret = register_pernet_subsys(&tee_net_ops);
if (ret < 0)
return ret;
ret = xt_register_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
if (ret < 0)
goto cleanup_subsys;
ret = register_netdevice_notifier(&tee_netdev_notifier);
if (ret < 0)
goto unregister_targets;
return 0;
unregister_targets:
xt_unregister_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
cleanup_subsys:
unregister_pernet_subsys(&tee_net_ops);
return ret;
}
static void __exit tee_tg_exit(void)
{
unregister_netdevice_notifier(&tee_netdev_notifier);
xt_unregister_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
unregister_pernet_subsys(&tee_net_ops);
}
module_init(tee_tg_init);
module_exit(tee_tg_exit);
MODULE_AUTHOR("Sebastian Claßen <[email protected]>");
MODULE_AUTHOR("Jan Engelhardt <[email protected]>");
MODULE_DESCRIPTION("Xtables: Reroute packet copy");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_TEE");
MODULE_ALIAS("ip6t_TEE");
| linux-master | net/netfilter/xt_TEE.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* This is a module which is used for logging packets to userspace via
* nfetlink.
*
* (C) 2005 by Harald Welte <[email protected]>
* (C) 2006-2012 Patrick McHardy <[email protected]>
*
* Based on the old ipv4-only ipt_ULOG.c:
* (C) 2000-2004 by Harald Welte <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/init.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/netdevice.h>
#include <linux/netfilter.h>
#include <linux/netfilter_bridge.h>
#include <net/netlink.h>
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_log.h>
#include <linux/netfilter/nf_conntrack_common.h>
#include <linux/spinlock.h>
#include <linux/sysctl.h>
#include <linux/proc_fs.h>
#include <linux/security.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <net/netfilter/nf_log.h>
#include <net/netns/generic.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
#include "../bridge/br_private.h"
#endif
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <net/netfilter/nf_conntrack.h>
#endif
#define NFULNL_COPY_DISABLED 0xff
#define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE
#define NFULNL_TIMEOUT_DEFAULT 100 /* every second */
#define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */
/* max packet size is limited by 16-bit struct nfattr nfa_len field */
#define NFULNL_COPY_RANGE_MAX (0xFFFF - NLA_HDRLEN)
#define PRINTR(x, args...) do { if (net_ratelimit()) \
printk(x, ## args); } while (0);
struct nfulnl_instance {
struct hlist_node hlist; /* global list of instances */
spinlock_t lock;
refcount_t use; /* use count */
unsigned int qlen; /* number of nlmsgs in skb */
struct sk_buff *skb; /* pre-allocatd skb */
struct timer_list timer;
struct net *net;
netns_tracker ns_tracker;
struct user_namespace *peer_user_ns; /* User namespace of the peer process */
u32 peer_portid; /* PORTID of the peer process */
/* configurable parameters */
unsigned int flushtimeout; /* timeout until queue flush */
unsigned int nlbufsiz; /* netlink buffer allocation size */
unsigned int qthreshold; /* threshold of the queue */
u_int32_t copy_range;
u_int32_t seq; /* instance-local sequential counter */
u_int16_t group_num; /* number of this queue */
u_int16_t flags;
u_int8_t copy_mode;
struct rcu_head rcu;
};
#define INSTANCE_BUCKETS 16
static unsigned int nfnl_log_net_id __read_mostly;
struct nfnl_log_net {
spinlock_t instances_lock;
struct hlist_head instance_table[INSTANCE_BUCKETS];
atomic_t global_seq;
};
static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
{
return net_generic(net, nfnl_log_net_id);
}
static inline u_int8_t instance_hashfn(u_int16_t group_num)
{
return ((group_num & 0xff) % INSTANCE_BUCKETS);
}
static struct nfulnl_instance *
__instance_lookup(const struct nfnl_log_net *log, u16 group_num)
{
const struct hlist_head *head;
struct nfulnl_instance *inst;
head = &log->instance_table[instance_hashfn(group_num)];
hlist_for_each_entry_rcu(inst, head, hlist) {
if (inst->group_num == group_num)
return inst;
}
return NULL;
}
static inline void
instance_get(struct nfulnl_instance *inst)
{
refcount_inc(&inst->use);
}
static struct nfulnl_instance *
instance_lookup_get_rcu(const struct nfnl_log_net *log, u16 group_num)
{
struct nfulnl_instance *inst;
inst = __instance_lookup(log, group_num);
if (inst && !refcount_inc_not_zero(&inst->use))
inst = NULL;
return inst;
}
static struct nfulnl_instance *
instance_lookup_get(const struct nfnl_log_net *log, u16 group_num)
{
struct nfulnl_instance *inst;
rcu_read_lock();
inst = instance_lookup_get_rcu(log, group_num);
rcu_read_unlock();
return inst;
}
static void nfulnl_instance_free_rcu(struct rcu_head *head)
{
struct nfulnl_instance *inst =
container_of(head, struct nfulnl_instance, rcu);
put_net_track(inst->net, &inst->ns_tracker);
kfree(inst);
module_put(THIS_MODULE);
}
static void
instance_put(struct nfulnl_instance *inst)
{
if (inst && refcount_dec_and_test(&inst->use))
call_rcu(&inst->rcu, nfulnl_instance_free_rcu);
}
static void nfulnl_timer(struct timer_list *t);
static struct nfulnl_instance *
instance_create(struct net *net, u_int16_t group_num,
u32 portid, struct user_namespace *user_ns)
{
struct nfulnl_instance *inst;
struct nfnl_log_net *log = nfnl_log_pernet(net);
int err;
spin_lock_bh(&log->instances_lock);
if (__instance_lookup(log, group_num)) {
err = -EEXIST;
goto out_unlock;
}
inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
if (!inst) {
err = -ENOMEM;
goto out_unlock;
}
if (!try_module_get(THIS_MODULE)) {
kfree(inst);
err = -EAGAIN;
goto out_unlock;
}
INIT_HLIST_NODE(&inst->hlist);
spin_lock_init(&inst->lock);
/* needs to be two, since we _put() after creation */
refcount_set(&inst->use, 2);
timer_setup(&inst->timer, nfulnl_timer, 0);
inst->net = get_net_track(net, &inst->ns_tracker, GFP_ATOMIC);
inst->peer_user_ns = user_ns;
inst->peer_portid = portid;
inst->group_num = group_num;
inst->qthreshold = NFULNL_QTHRESH_DEFAULT;
inst->flushtimeout = NFULNL_TIMEOUT_DEFAULT;
inst->nlbufsiz = NFULNL_NLBUFSIZ_DEFAULT;
inst->copy_mode = NFULNL_COPY_PACKET;
inst->copy_range = NFULNL_COPY_RANGE_MAX;
hlist_add_head_rcu(&inst->hlist,
&log->instance_table[instance_hashfn(group_num)]);
spin_unlock_bh(&log->instances_lock);
return inst;
out_unlock:
spin_unlock_bh(&log->instances_lock);
return ERR_PTR(err);
}
static void __nfulnl_flush(struct nfulnl_instance *inst);
/* called with BH disabled */
static void
__instance_destroy(struct nfulnl_instance *inst)
{
/* first pull it out of the global list */
hlist_del_rcu(&inst->hlist);
/* then flush all pending packets from skb */
spin_lock(&inst->lock);
/* lockless readers wont be able to use us */
inst->copy_mode = NFULNL_COPY_DISABLED;
if (inst->skb)
__nfulnl_flush(inst);
spin_unlock(&inst->lock);
/* and finally put the refcount */
instance_put(inst);
}
static inline void
instance_destroy(struct nfnl_log_net *log,
struct nfulnl_instance *inst)
{
spin_lock_bh(&log->instances_lock);
__instance_destroy(inst);
spin_unlock_bh(&log->instances_lock);
}
static int
nfulnl_set_mode(struct nfulnl_instance *inst, u_int8_t mode,
unsigned int range)
{
int status = 0;
spin_lock_bh(&inst->lock);
switch (mode) {
case NFULNL_COPY_NONE:
case NFULNL_COPY_META:
inst->copy_mode = mode;
inst->copy_range = 0;
break;
case NFULNL_COPY_PACKET:
inst->copy_mode = mode;
if (range == 0)
range = NFULNL_COPY_RANGE_MAX;
inst->copy_range = min_t(unsigned int,
range, NFULNL_COPY_RANGE_MAX);
break;
default:
status = -EINVAL;
break;
}
spin_unlock_bh(&inst->lock);
return status;
}
static int
nfulnl_set_nlbufsiz(struct nfulnl_instance *inst, u_int32_t nlbufsiz)
{
int status;
spin_lock_bh(&inst->lock);
if (nlbufsiz < NFULNL_NLBUFSIZ_DEFAULT)
status = -ERANGE;
else if (nlbufsiz > 131072)
status = -ERANGE;
else {
inst->nlbufsiz = nlbufsiz;
status = 0;
}
spin_unlock_bh(&inst->lock);
return status;
}
static void
nfulnl_set_timeout(struct nfulnl_instance *inst, u_int32_t timeout)
{
spin_lock_bh(&inst->lock);
inst->flushtimeout = timeout;
spin_unlock_bh(&inst->lock);
}
static void
nfulnl_set_qthresh(struct nfulnl_instance *inst, u_int32_t qthresh)
{
spin_lock_bh(&inst->lock);
inst->qthreshold = qthresh;
spin_unlock_bh(&inst->lock);
}
static int
nfulnl_set_flags(struct nfulnl_instance *inst, u_int16_t flags)
{
spin_lock_bh(&inst->lock);
inst->flags = flags;
spin_unlock_bh(&inst->lock);
return 0;
}
static struct sk_buff *
nfulnl_alloc_skb(struct net *net, u32 peer_portid, unsigned int inst_size,
unsigned int pkt_size)
{
struct sk_buff *skb;
unsigned int n;
/* alloc skb which should be big enough for a whole multipart
* message. WARNING: has to be <= 128k due to slab restrictions */
n = max(inst_size, pkt_size);
skb = alloc_skb(n, GFP_ATOMIC | __GFP_NOWARN);
if (!skb) {
if (n > pkt_size) {
/* try to allocate only as much as we need for current
* packet */
skb = alloc_skb(pkt_size, GFP_ATOMIC);
}
}
return skb;
}
static void
__nfulnl_send(struct nfulnl_instance *inst)
{
if (inst->qlen > 1) {
struct nlmsghdr *nlh = nlmsg_put(inst->skb, 0, 0,
NLMSG_DONE,
sizeof(struct nfgenmsg),
0);
if (WARN_ONCE(!nlh, "bad nlskb size: %u, tailroom %d\n",
inst->skb->len, skb_tailroom(inst->skb))) {
kfree_skb(inst->skb);
goto out;
}
}
nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid);
out:
inst->qlen = 0;
inst->skb = NULL;
}
static void
__nfulnl_flush(struct nfulnl_instance *inst)
{
/* timer holds a reference */
if (del_timer(&inst->timer))
instance_put(inst);
if (inst->skb)
__nfulnl_send(inst);
}
static void
nfulnl_timer(struct timer_list *t)
{
struct nfulnl_instance *inst = from_timer(inst, t, timer);
spin_lock_bh(&inst->lock);
if (inst->skb)
__nfulnl_send(inst);
spin_unlock_bh(&inst->lock);
instance_put(inst);
}
static u32 nfulnl_get_bridge_size(const struct sk_buff *skb)
{
u32 size = 0;
if (!skb_mac_header_was_set(skb))
return 0;
if (skb_vlan_tag_present(skb)) {
size += nla_total_size(0); /* nested */
size += nla_total_size(sizeof(u16)); /* id */
size += nla_total_size(sizeof(u16)); /* tag */
}
if (skb->network_header > skb->mac_header)
size += nla_total_size(skb->network_header - skb->mac_header);
return size;
}
static int nfulnl_put_bridge(struct nfulnl_instance *inst, const struct sk_buff *skb)
{
if (!skb_mac_header_was_set(skb))
return 0;
if (skb_vlan_tag_present(skb)) {
struct nlattr *nest;
nest = nla_nest_start(inst->skb, NFULA_VLAN);
if (!nest)
goto nla_put_failure;
if (nla_put_be16(inst->skb, NFULA_VLAN_TCI, htons(skb->vlan_tci)) ||
nla_put_be16(inst->skb, NFULA_VLAN_PROTO, skb->vlan_proto))
goto nla_put_failure;
nla_nest_end(inst->skb, nest);
}
if (skb->mac_header < skb->network_header) {
int len = (int)(skb->network_header - skb->mac_header);
if (nla_put(inst->skb, NFULA_L2HDR, len, skb_mac_header(skb)))
goto nla_put_failure;
}
return 0;
nla_put_failure:
return -1;
}
/* This is an inline function, we don't really care about a long
* list of arguments */
static inline int
__build_packet_message(struct nfnl_log_net *log,
struct nfulnl_instance *inst,
const struct sk_buff *skb,
unsigned int data_len,
u_int8_t pf,
unsigned int hooknum,
const struct net_device *indev,
const struct net_device *outdev,
const char *prefix, unsigned int plen,
const struct nfnl_ct_hook *nfnl_ct,
struct nf_conn *ct, enum ip_conntrack_info ctinfo)
{
struct nfulnl_msg_packet_hdr pmsg;
struct nlmsghdr *nlh;
sk_buff_data_t old_tail = inst->skb->tail;
struct sock *sk;
const unsigned char *hwhdrp;
nlh = nfnl_msg_put(inst->skb, 0, 0,
nfnl_msg_type(NFNL_SUBSYS_ULOG, NFULNL_MSG_PACKET),
0, pf, NFNETLINK_V0, htons(inst->group_num));
if (!nlh)
return -1;
memset(&pmsg, 0, sizeof(pmsg));
pmsg.hw_protocol = skb->protocol;
pmsg.hook = hooknum;
if (nla_put(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg))
goto nla_put_failure;
if (prefix &&
nla_put(inst->skb, NFULA_PREFIX, plen, prefix))
goto nla_put_failure;
if (indev) {
#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
htonl(indev->ifindex)))
goto nla_put_failure;
#else
if (pf == PF_BRIDGE) {
/* Case 1: outdev is physical input device, we need to
* look for bridge group (when called from
* netfilter_bridge) */
if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
htonl(indev->ifindex)) ||
/* this is the bridge group "brX" */
/* rcu_read_lock()ed by nf_hook_thresh or
* nf_log_packet.
*/
nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
goto nla_put_failure;
} else {
struct net_device *physindev;
/* Case 2: indev is bridge group, we need to look for
* physical device (when called from ipv4) */
if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
htonl(indev->ifindex)))
goto nla_put_failure;
physindev = nf_bridge_get_physindev(skb);
if (physindev &&
nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
htonl(physindev->ifindex)))
goto nla_put_failure;
}
#endif
}
if (outdev) {
#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
htonl(outdev->ifindex)))
goto nla_put_failure;
#else
if (pf == PF_BRIDGE) {
/* Case 1: outdev is physical output device, we need to
* look for bridge group (when called from
* netfilter_bridge) */
if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
htonl(outdev->ifindex)) ||
/* this is the bridge group "brX" */
/* rcu_read_lock()ed by nf_hook_thresh or
* nf_log_packet.
*/
nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
goto nla_put_failure;
} else {
struct net_device *physoutdev;
/* Case 2: indev is a bridge group, we need to look
* for physical device (when called from ipv4) */
if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
htonl(outdev->ifindex)))
goto nla_put_failure;
physoutdev = nf_bridge_get_physoutdev(skb);
if (physoutdev &&
nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
htonl(physoutdev->ifindex)))
goto nla_put_failure;
}
#endif
}
if (skb->mark &&
nla_put_be32(inst->skb, NFULA_MARK, htonl(skb->mark)))
goto nla_put_failure;
if (indev && skb->dev &&
skb_mac_header_was_set(skb) &&
skb_mac_header_len(skb) != 0) {
struct nfulnl_msg_packet_hw phw;
int len;
memset(&phw, 0, sizeof(phw));
len = dev_parse_header(skb, phw.hw_addr);
if (len > 0) {
phw.hw_addrlen = htons(len);
if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
goto nla_put_failure;
}
}
if (indev && skb_mac_header_was_set(skb)) {
if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
nla_put_be16(inst->skb, NFULA_HWLEN,
htons(skb->dev->hard_header_len)))
goto nla_put_failure;
hwhdrp = skb_mac_header(skb);
if (skb->dev->type == ARPHRD_SIT)
hwhdrp -= ETH_HLEN;
if (hwhdrp >= skb->head &&
nla_put(inst->skb, NFULA_HWHEADER,
skb->dev->hard_header_len, hwhdrp))
goto nla_put_failure;
}
if (hooknum <= NF_INET_FORWARD) {
struct timespec64 kts = ktime_to_timespec64(skb_tstamp_cond(skb, true));
struct nfulnl_msg_packet_timestamp ts;
ts.sec = cpu_to_be64(kts.tv_sec);
ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC);
if (nla_put(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts))
goto nla_put_failure;
}
/* UID */
sk = skb->sk;
if (sk && sk_fullsock(sk)) {
read_lock_bh(&sk->sk_callback_lock);
if (sk->sk_socket && sk->sk_socket->file) {
struct file *file = sk->sk_socket->file;
const struct cred *cred = file->f_cred;
struct user_namespace *user_ns = inst->peer_user_ns;
__be32 uid = htonl(from_kuid_munged(user_ns, cred->fsuid));
__be32 gid = htonl(from_kgid_munged(user_ns, cred->fsgid));
read_unlock_bh(&sk->sk_callback_lock);
if (nla_put_be32(inst->skb, NFULA_UID, uid) ||
nla_put_be32(inst->skb, NFULA_GID, gid))
goto nla_put_failure;
} else
read_unlock_bh(&sk->sk_callback_lock);
}
/* local sequence number */
if ((inst->flags & NFULNL_CFG_F_SEQ) &&
nla_put_be32(inst->skb, NFULA_SEQ, htonl(inst->seq++)))
goto nla_put_failure;
/* global sequence number */
if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
htonl(atomic_inc_return(&log->global_seq))))
goto nla_put_failure;
if (ct && nfnl_ct->build(inst->skb, ct, ctinfo,
NFULA_CT, NFULA_CT_INFO) < 0)
goto nla_put_failure;
if ((pf == NFPROTO_NETDEV || pf == NFPROTO_BRIDGE) &&
nfulnl_put_bridge(inst, skb) < 0)
goto nla_put_failure;
if (data_len) {
struct nlattr *nla;
int size = nla_attr_size(data_len);
if (skb_tailroom(inst->skb) < nla_total_size(data_len))
goto nla_put_failure;
nla = skb_put(inst->skb, nla_total_size(data_len));
nla->nla_type = NFULA_PAYLOAD;
nla->nla_len = size;
if (skb_copy_bits(skb, 0, nla_data(nla), data_len))
BUG();
}
nlh->nlmsg_len = inst->skb->tail - old_tail;
return 0;
nla_put_failure:
PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n");
return -1;
}
static const struct nf_loginfo default_loginfo = {
.type = NF_LOG_TYPE_ULOG,
.u = {
.ulog = {
.copy_len = 0xffff,
.group = 0,
.qthreshold = 1,
},
},
};
/* log handler for internal netfilter logging api */
static void
nfulnl_log_packet(struct net *net,
u_int8_t pf,
unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const struct nf_loginfo *li_user,
const char *prefix)
{
size_t size;
unsigned int data_len;
struct nfulnl_instance *inst;
const struct nf_loginfo *li;
unsigned int qthreshold;
unsigned int plen = 0;
struct nfnl_log_net *log = nfnl_log_pernet(net);
const struct nfnl_ct_hook *nfnl_ct = NULL;
struct nf_conn *ct = NULL;
enum ip_conntrack_info ctinfo;
if (li_user && li_user->type == NF_LOG_TYPE_ULOG)
li = li_user;
else
li = &default_loginfo;
inst = instance_lookup_get_rcu(log, li->u.ulog.group);
if (!inst)
return;
if (prefix)
plen = strlen(prefix) + 1;
/* FIXME: do we want to make the size calculation conditional based on
* what is actually present? way more branches and checks, but more
* memory efficient... */
size = nlmsg_total_size(sizeof(struct nfgenmsg))
+ nla_total_size(sizeof(struct nfulnl_msg_packet_hdr))
+ nla_total_size(sizeof(u_int32_t)) /* ifindex */
+ nla_total_size(sizeof(u_int32_t)) /* ifindex */
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ nla_total_size(sizeof(u_int32_t)) /* ifindex */
+ nla_total_size(sizeof(u_int32_t)) /* ifindex */
#endif
+ nla_total_size(sizeof(u_int32_t)) /* mark */
+ nla_total_size(sizeof(u_int32_t)) /* uid */
+ nla_total_size(sizeof(u_int32_t)) /* gid */
+ nla_total_size(plen) /* prefix */
+ nla_total_size(sizeof(struct nfulnl_msg_packet_hw))
+ nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp))
+ nla_total_size(sizeof(struct nfgenmsg)); /* NLMSG_DONE */
if (in && skb_mac_header_was_set(skb)) {
size += nla_total_size(skb->dev->hard_header_len)
+ nla_total_size(sizeof(u_int16_t)) /* hwtype */
+ nla_total_size(sizeof(u_int16_t)); /* hwlen */
}
spin_lock_bh(&inst->lock);
if (inst->flags & NFULNL_CFG_F_SEQ)
size += nla_total_size(sizeof(u_int32_t));
if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
size += nla_total_size(sizeof(u_int32_t));
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
if (inst->flags & NFULNL_CFG_F_CONNTRACK) {
nfnl_ct = rcu_dereference(nfnl_ct_hook);
if (nfnl_ct != NULL) {
ct = nf_ct_get(skb, &ctinfo);
if (ct != NULL)
size += nfnl_ct->build_size(ct);
}
}
#endif
if (pf == NFPROTO_NETDEV || pf == NFPROTO_BRIDGE)
size += nfulnl_get_bridge_size(skb);
qthreshold = inst->qthreshold;
/* per-rule qthreshold overrides per-instance */
if (li->u.ulog.qthreshold)
if (qthreshold > li->u.ulog.qthreshold)
qthreshold = li->u.ulog.qthreshold;
switch (inst->copy_mode) {
case NFULNL_COPY_META:
case NFULNL_COPY_NONE:
data_len = 0;
break;
case NFULNL_COPY_PACKET:
data_len = inst->copy_range;
if ((li->u.ulog.flags & NF_LOG_F_COPY_LEN) &&
(li->u.ulog.copy_len < data_len))
data_len = li->u.ulog.copy_len;
if (data_len > skb->len)
data_len = skb->len;
size += nla_total_size(data_len);
break;
case NFULNL_COPY_DISABLED:
default:
goto unlock_and_release;
}
if (inst->skb && size > skb_tailroom(inst->skb)) {
/* either the queue len is too high or we don't have
* enough room in the skb left. flush to userspace. */
__nfulnl_flush(inst);
}
if (!inst->skb) {
inst->skb = nfulnl_alloc_skb(net, inst->peer_portid,
inst->nlbufsiz, size);
if (!inst->skb)
goto alloc_failure;
}
inst->qlen++;
__build_packet_message(log, inst, skb, data_len, pf,
hooknum, in, out, prefix, plen,
nfnl_ct, ct, ctinfo);
if (inst->qlen >= qthreshold)
__nfulnl_flush(inst);
/* timer_pending always called within inst->lock, so there
* is no chance of a race here */
else if (!timer_pending(&inst->timer)) {
instance_get(inst);
inst->timer.expires = jiffies + (inst->flushtimeout*HZ/100);
add_timer(&inst->timer);
}
unlock_and_release:
spin_unlock_bh(&inst->lock);
instance_put(inst);
return;
alloc_failure:
/* FIXME: statistics */
goto unlock_and_release;
}
static int
nfulnl_rcv_nl_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct netlink_notify *n = ptr;
struct nfnl_log_net *log = nfnl_log_pernet(n->net);
if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
int i;
/* destroy all instances for this portid */
spin_lock_bh(&log->instances_lock);
for (i = 0; i < INSTANCE_BUCKETS; i++) {
struct hlist_node *t2;
struct nfulnl_instance *inst;
struct hlist_head *head = &log->instance_table[i];
hlist_for_each_entry_safe(inst, t2, head, hlist) {
if (n->portid == inst->peer_portid)
__instance_destroy(inst);
}
}
spin_unlock_bh(&log->instances_lock);
}
return NOTIFY_DONE;
}
static struct notifier_block nfulnl_rtnl_notifier = {
.notifier_call = nfulnl_rcv_nl_event,
};
static int nfulnl_recv_unsupp(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nfula[])
{
return -ENOTSUPP;
}
static struct nf_logger nfulnl_logger __read_mostly = {
.name = "nfnetlink_log",
.type = NF_LOG_TYPE_ULOG,
.logfn = nfulnl_log_packet,
.me = THIS_MODULE,
};
static const struct nla_policy nfula_cfg_policy[NFULA_CFG_MAX+1] = {
[NFULA_CFG_CMD] = { .len = sizeof(struct nfulnl_msg_config_cmd) },
[NFULA_CFG_MODE] = { .len = sizeof(struct nfulnl_msg_config_mode) },
[NFULA_CFG_TIMEOUT] = { .type = NLA_U32 },
[NFULA_CFG_QTHRESH] = { .type = NLA_U32 },
[NFULA_CFG_NLBUFSIZ] = { .type = NLA_U32 },
[NFULA_CFG_FLAGS] = { .type = NLA_U16 },
};
static int nfulnl_recv_config(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nfula[])
{
struct nfnl_log_net *log = nfnl_log_pernet(info->net);
u_int16_t group_num = ntohs(info->nfmsg->res_id);
struct nfulnl_msg_config_cmd *cmd = NULL;
struct nfulnl_instance *inst;
u16 flags = 0;
int ret = 0;
if (nfula[NFULA_CFG_CMD]) {
u_int8_t pf = info->nfmsg->nfgen_family;
cmd = nla_data(nfula[NFULA_CFG_CMD]);
/* Commands without queue context */
switch (cmd->command) {
case NFULNL_CFG_CMD_PF_BIND:
return nf_log_bind_pf(info->net, pf, &nfulnl_logger);
case NFULNL_CFG_CMD_PF_UNBIND:
nf_log_unbind_pf(info->net, pf);
return 0;
}
}
inst = instance_lookup_get(log, group_num);
if (inst && inst->peer_portid != NETLINK_CB(skb).portid) {
ret = -EPERM;
goto out_put;
}
/* Check if we support these flags in first place, dependencies should
* be there too not to break atomicity.
*/
if (nfula[NFULA_CFG_FLAGS]) {
flags = ntohs(nla_get_be16(nfula[NFULA_CFG_FLAGS]));
if ((flags & NFULNL_CFG_F_CONNTRACK) &&
!rcu_access_pointer(nfnl_ct_hook)) {
#ifdef CONFIG_MODULES
nfnl_unlock(NFNL_SUBSYS_ULOG);
request_module("ip_conntrack_netlink");
nfnl_lock(NFNL_SUBSYS_ULOG);
if (rcu_access_pointer(nfnl_ct_hook)) {
ret = -EAGAIN;
goto out_put;
}
#endif
ret = -EOPNOTSUPP;
goto out_put;
}
}
if (cmd != NULL) {
switch (cmd->command) {
case NFULNL_CFG_CMD_BIND:
if (inst) {
ret = -EBUSY;
goto out_put;
}
inst = instance_create(info->net, group_num,
NETLINK_CB(skb).portid,
sk_user_ns(NETLINK_CB(skb).sk));
if (IS_ERR(inst)) {
ret = PTR_ERR(inst);
goto out;
}
break;
case NFULNL_CFG_CMD_UNBIND:
if (!inst) {
ret = -ENODEV;
goto out;
}
instance_destroy(log, inst);
goto out_put;
default:
ret = -ENOTSUPP;
goto out_put;
}
} else if (!inst) {
ret = -ENODEV;
goto out;
}
if (nfula[NFULA_CFG_MODE]) {
struct nfulnl_msg_config_mode *params =
nla_data(nfula[NFULA_CFG_MODE]);
nfulnl_set_mode(inst, params->copy_mode,
ntohl(params->copy_range));
}
if (nfula[NFULA_CFG_TIMEOUT]) {
__be32 timeout = nla_get_be32(nfula[NFULA_CFG_TIMEOUT]);
nfulnl_set_timeout(inst, ntohl(timeout));
}
if (nfula[NFULA_CFG_NLBUFSIZ]) {
__be32 nlbufsiz = nla_get_be32(nfula[NFULA_CFG_NLBUFSIZ]);
nfulnl_set_nlbufsiz(inst, ntohl(nlbufsiz));
}
if (nfula[NFULA_CFG_QTHRESH]) {
__be32 qthresh = nla_get_be32(nfula[NFULA_CFG_QTHRESH]);
nfulnl_set_qthresh(inst, ntohl(qthresh));
}
if (nfula[NFULA_CFG_FLAGS])
nfulnl_set_flags(inst, flags);
out_put:
instance_put(inst);
out:
return ret;
}
static const struct nfnl_callback nfulnl_cb[NFULNL_MSG_MAX] = {
[NFULNL_MSG_PACKET] = {
.call = nfulnl_recv_unsupp,
.type = NFNL_CB_MUTEX,
.attr_count = NFULA_MAX,
},
[NFULNL_MSG_CONFIG] = {
.call = nfulnl_recv_config,
.type = NFNL_CB_MUTEX,
.attr_count = NFULA_CFG_MAX,
.policy = nfula_cfg_policy
},
};
static const struct nfnetlink_subsystem nfulnl_subsys = {
.name = "log",
.subsys_id = NFNL_SUBSYS_ULOG,
.cb_count = NFULNL_MSG_MAX,
.cb = nfulnl_cb,
};
#ifdef CONFIG_PROC_FS
struct iter_state {
struct seq_net_private p;
unsigned int bucket;
};
static struct hlist_node *get_first(struct net *net, struct iter_state *st)
{
struct nfnl_log_net *log;
if (!st)
return NULL;
log = nfnl_log_pernet(net);
for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
struct hlist_head *head = &log->instance_table[st->bucket];
if (!hlist_empty(head))
return rcu_dereference(hlist_first_rcu(head));
}
return NULL;
}
static struct hlist_node *get_next(struct net *net, struct iter_state *st,
struct hlist_node *h)
{
h = rcu_dereference(hlist_next_rcu(h));
while (!h) {
struct nfnl_log_net *log;
struct hlist_head *head;
if (++st->bucket >= INSTANCE_BUCKETS)
return NULL;
log = nfnl_log_pernet(net);
head = &log->instance_table[st->bucket];
h = rcu_dereference(hlist_first_rcu(head));
}
return h;
}
static struct hlist_node *get_idx(struct net *net, struct iter_state *st,
loff_t pos)
{
struct hlist_node *head;
head = get_first(net, st);
if (head)
while (pos && (head = get_next(net, st, head)))
pos--;
return pos ? NULL : head;
}
static void *seq_start(struct seq_file *s, loff_t *pos)
__acquires(rcu)
{
rcu_read_lock();
return get_idx(seq_file_net(s), s->private, *pos);
}
static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
{
(*pos)++;
return get_next(seq_file_net(s), s->private, v);
}
static void seq_stop(struct seq_file *s, void *v)
__releases(rcu)
{
rcu_read_unlock();
}
static int seq_show(struct seq_file *s, void *v)
{
const struct nfulnl_instance *inst = v;
seq_printf(s, "%5u %6u %5u %1u %5u %6u %2u\n",
inst->group_num,
inst->peer_portid, inst->qlen,
inst->copy_mode, inst->copy_range,
inst->flushtimeout, refcount_read(&inst->use));
return 0;
}
static const struct seq_operations nful_seq_ops = {
.start = seq_start,
.next = seq_next,
.stop = seq_stop,
.show = seq_show,
};
#endif /* PROC_FS */
static int __net_init nfnl_log_net_init(struct net *net)
{
unsigned int i;
struct nfnl_log_net *log = nfnl_log_pernet(net);
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *proc;
kuid_t root_uid;
kgid_t root_gid;
#endif
for (i = 0; i < INSTANCE_BUCKETS; i++)
INIT_HLIST_HEAD(&log->instance_table[i]);
spin_lock_init(&log->instances_lock);
#ifdef CONFIG_PROC_FS
proc = proc_create_net("nfnetlink_log", 0440, net->nf.proc_netfilter,
&nful_seq_ops, sizeof(struct iter_state));
if (!proc)
return -ENOMEM;
root_uid = make_kuid(net->user_ns, 0);
root_gid = make_kgid(net->user_ns, 0);
if (uid_valid(root_uid) && gid_valid(root_gid))
proc_set_user(proc, root_uid, root_gid);
#endif
return 0;
}
static void __net_exit nfnl_log_net_exit(struct net *net)
{
struct nfnl_log_net *log = nfnl_log_pernet(net);
unsigned int i;
#ifdef CONFIG_PROC_FS
remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter);
#endif
nf_log_unset(net, &nfulnl_logger);
for (i = 0; i < INSTANCE_BUCKETS; i++)
WARN_ON_ONCE(!hlist_empty(&log->instance_table[i]));
}
static struct pernet_operations nfnl_log_net_ops = {
.init = nfnl_log_net_init,
.exit = nfnl_log_net_exit,
.id = &nfnl_log_net_id,
.size = sizeof(struct nfnl_log_net),
};
static int __init nfnetlink_log_init(void)
{
int status;
status = register_pernet_subsys(&nfnl_log_net_ops);
if (status < 0) {
pr_err("failed to register pernet ops\n");
goto out;
}
netlink_register_notifier(&nfulnl_rtnl_notifier);
status = nfnetlink_subsys_register(&nfulnl_subsys);
if (status < 0) {
pr_err("failed to create netlink socket\n");
goto cleanup_netlink_notifier;
}
status = nf_log_register(NFPROTO_UNSPEC, &nfulnl_logger);
if (status < 0) {
pr_err("failed to register logger\n");
goto cleanup_subsys;
}
return status;
cleanup_subsys:
nfnetlink_subsys_unregister(&nfulnl_subsys);
cleanup_netlink_notifier:
netlink_unregister_notifier(&nfulnl_rtnl_notifier);
unregister_pernet_subsys(&nfnl_log_net_ops);
out:
return status;
}
static void __exit nfnetlink_log_fini(void)
{
nfnetlink_subsys_unregister(&nfulnl_subsys);
netlink_unregister_notifier(&nfulnl_rtnl_notifier);
unregister_pernet_subsys(&nfnl_log_net_ops);
nf_log_unregister(&nfulnl_logger);
}
MODULE_DESCRIPTION("netfilter userspace logging");
MODULE_AUTHOR("Harald Welte <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_ULOG);
MODULE_ALIAS_NF_LOGGER(AF_INET, 1);
MODULE_ALIAS_NF_LOGGER(AF_INET6, 1);
MODULE_ALIAS_NF_LOGGER(AF_BRIDGE, 1);
MODULE_ALIAS_NF_LOGGER(3, 1); /* NFPROTO_ARP */
MODULE_ALIAS_NF_LOGGER(5, 1); /* NFPROTO_NETDEV */
module_init(nfnetlink_log_init);
module_exit(nfnetlink_log_fini);
| linux-master | net/netfilter/nfnetlink_log.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017 Pablo M. Bermudo Garay <[email protected]>
*
* This code is based on net/netfilter/nft_fib_inet.c, written by
* Florian Westphal <[email protected]>.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_tables.h>
#include <net/ipv6.h>
#include <net/netfilter/nft_fib.h>
static void nft_fib_netdev_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_fib *priv = nft_expr_priv(expr);
switch (ntohs(pkt->skb->protocol)) {
case ETH_P_IP:
switch (priv->result) {
case NFT_FIB_RESULT_OIF:
case NFT_FIB_RESULT_OIFNAME:
return nft_fib4_eval(expr, regs, pkt);
case NFT_FIB_RESULT_ADDRTYPE:
return nft_fib4_eval_type(expr, regs, pkt);
}
break;
case ETH_P_IPV6:
if (!ipv6_mod_enabled())
break;
switch (priv->result) {
case NFT_FIB_RESULT_OIF:
case NFT_FIB_RESULT_OIFNAME:
return nft_fib6_eval(expr, regs, pkt);
case NFT_FIB_RESULT_ADDRTYPE:
return nft_fib6_eval_type(expr, regs, pkt);
}
break;
}
regs->verdict.code = NFT_BREAK;
}
static struct nft_expr_type nft_fib_netdev_type;
static const struct nft_expr_ops nft_fib_netdev_ops = {
.type = &nft_fib_netdev_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_fib)),
.eval = nft_fib_netdev_eval,
.init = nft_fib_init,
.dump = nft_fib_dump,
.validate = nft_fib_validate,
.reduce = nft_fib_reduce,
};
static struct nft_expr_type nft_fib_netdev_type __read_mostly = {
.family = NFPROTO_NETDEV,
.name = "fib",
.ops = &nft_fib_netdev_ops,
.policy = nft_fib_policy,
.maxattr = NFTA_FIB_MAX,
.owner = THIS_MODULE,
};
static int __init nft_fib_netdev_module_init(void)
{
return nft_register_expr(&nft_fib_netdev_type);
}
static void __exit nft_fib_netdev_module_exit(void)
{
nft_unregister_expr(&nft_fib_netdev_type);
}
module_init(nft_fib_netdev_module_init);
module_exit(nft_fib_netdev_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo M. Bermudo Garay <[email protected]>");
MODULE_ALIAS_NFT_AF_EXPR(5, "fib");
MODULE_DESCRIPTION("nftables netdev fib lookups support");
| linux-master | net/netfilter/nft_fib_netdev.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* iptables module for DCCP protocol header matching
*
* (C) 2005 by Harald Welte <[email protected]>
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <net/ip.h>
#include <linux/dccp.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_dccp.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <[email protected]>");
MODULE_DESCRIPTION("Xtables: DCCP protocol packet match");
MODULE_ALIAS("ipt_dccp");
MODULE_ALIAS("ip6t_dccp");
#define DCCHECK(cond, option, flag, invflag) (!((flag) & (option)) \
|| (!!((invflag) & (option)) ^ (cond)))
static unsigned char *dccp_optbuf;
static DEFINE_SPINLOCK(dccp_buflock);
static inline bool
dccp_find_option(u_int8_t option,
const struct sk_buff *skb,
unsigned int protoff,
const struct dccp_hdr *dh,
bool *hotdrop)
{
/* tcp.doff is only 4 bits, ie. max 15 * 4 bytes */
const unsigned char *op;
unsigned int optoff = __dccp_hdr_len(dh);
unsigned int optlen = dh->dccph_doff*4 - __dccp_hdr_len(dh);
unsigned int i;
if (dh->dccph_doff * 4 < __dccp_hdr_len(dh))
goto invalid;
if (!optlen)
return false;
spin_lock_bh(&dccp_buflock);
op = skb_header_pointer(skb, protoff + optoff, optlen, dccp_optbuf);
if (op == NULL) {
/* If we don't have the whole header, drop packet. */
goto partial;
}
for (i = 0; i < optlen; ) {
if (op[i] == option) {
spin_unlock_bh(&dccp_buflock);
return true;
}
if (op[i] < 2)
i++;
else
i += op[i+1]?:1;
}
spin_unlock_bh(&dccp_buflock);
return false;
partial:
spin_unlock_bh(&dccp_buflock);
invalid:
*hotdrop = true;
return false;
}
static inline bool
match_types(const struct dccp_hdr *dh, u_int16_t typemask)
{
return typemask & (1 << dh->dccph_type);
}
static inline bool
match_option(u_int8_t option, const struct sk_buff *skb, unsigned int protoff,
const struct dccp_hdr *dh, bool *hotdrop)
{
return dccp_find_option(option, skb, protoff, dh, hotdrop);
}
static bool
dccp_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_dccp_info *info = par->matchinfo;
const struct dccp_hdr *dh;
struct dccp_hdr _dh;
if (par->fragoff != 0)
return false;
dh = skb_header_pointer(skb, par->thoff, sizeof(_dh), &_dh);
if (dh == NULL) {
par->hotdrop = true;
return false;
}
return DCCHECK(ntohs(dh->dccph_sport) >= info->spts[0]
&& ntohs(dh->dccph_sport) <= info->spts[1],
XT_DCCP_SRC_PORTS, info->flags, info->invflags)
&& DCCHECK(ntohs(dh->dccph_dport) >= info->dpts[0]
&& ntohs(dh->dccph_dport) <= info->dpts[1],
XT_DCCP_DEST_PORTS, info->flags, info->invflags)
&& DCCHECK(match_types(dh, info->typemask),
XT_DCCP_TYPE, info->flags, info->invflags)
&& DCCHECK(match_option(info->option, skb, par->thoff, dh,
&par->hotdrop),
XT_DCCP_OPTION, info->flags, info->invflags);
}
static int dccp_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_dccp_info *info = par->matchinfo;
if (info->flags & ~XT_DCCP_VALID_FLAGS)
return -EINVAL;
if (info->invflags & ~XT_DCCP_VALID_FLAGS)
return -EINVAL;
if (info->invflags & ~info->flags)
return -EINVAL;
return 0;
}
static struct xt_match dccp_mt_reg[] __read_mostly = {
{
.name = "dccp",
.family = NFPROTO_IPV4,
.checkentry = dccp_mt_check,
.match = dccp_mt,
.matchsize = sizeof(struct xt_dccp_info),
.proto = IPPROTO_DCCP,
.me = THIS_MODULE,
},
{
.name = "dccp",
.family = NFPROTO_IPV6,
.checkentry = dccp_mt_check,
.match = dccp_mt,
.matchsize = sizeof(struct xt_dccp_info),
.proto = IPPROTO_DCCP,
.me = THIS_MODULE,
},
};
static int __init dccp_mt_init(void)
{
int ret;
/* doff is 8 bits, so the maximum option size is (4*256). Don't put
* this in BSS since DaveM is worried about locked TLB's for kernel
* BSS. */
dccp_optbuf = kmalloc(256 * 4, GFP_KERNEL);
if (!dccp_optbuf)
return -ENOMEM;
ret = xt_register_matches(dccp_mt_reg, ARRAY_SIZE(dccp_mt_reg));
if (ret)
goto out_kfree;
return ret;
out_kfree:
kfree(dccp_optbuf);
return ret;
}
static void __exit dccp_mt_exit(void)
{
xt_unregister_matches(dccp_mt_reg, ARRAY_SIZE(dccp_mt_reg));
kfree(dccp_optbuf);
}
module_init(dccp_mt_init);
module_exit(dccp_mt_exit);
| linux-master | net/netfilter/xt_dccp.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Kernel module to match running CPU */
/*
* Might be used to distribute connections on several daemons, if
* RPS (Remote Packet Steering) is enabled or NIC is multiqueue capable,
* each RX queue IRQ affined to one CPU (1:1 mapping)
*/
/* (C) 2010 Eric Dumazet
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter/xt_cpu.h>
#include <linux/netfilter/x_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Eric Dumazet <[email protected]>");
MODULE_DESCRIPTION("Xtables: CPU match");
MODULE_ALIAS("ipt_cpu");
MODULE_ALIAS("ip6t_cpu");
static int cpu_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_cpu_info *info = par->matchinfo;
if (info->invert & ~1)
return -EINVAL;
return 0;
}
static bool cpu_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_cpu_info *info = par->matchinfo;
return (info->cpu == smp_processor_id()) ^ info->invert;
}
static struct xt_match cpu_mt_reg __read_mostly = {
.name = "cpu",
.revision = 0,
.family = NFPROTO_UNSPEC,
.checkentry = cpu_mt_check,
.match = cpu_mt,
.matchsize = sizeof(struct xt_cpu_info),
.me = THIS_MODULE,
};
static int __init cpu_mt_init(void)
{
return xt_register_match(&cpu_mt_reg);
}
static void __exit cpu_mt_exit(void)
{
xt_unregister_match(&cpu_mt_reg);
}
module_init(cpu_mt_init);
module_exit(cpu_mt_exit);
| linux-master | net/netfilter/xt_cpu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* H.323 connection tracking helper
*
* Copyright (c) 2006 Jing Min Zhao <[email protected]>
* Copyright (c) 2006-2012 Patrick McHardy <[email protected]>
*
* Based on the 'brute force' H.323 connection tracking module by
* Jozsef Kadlecsik <[email protected]>
*
* For more information, please see http://nath323.sourceforge.net/
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/ctype.h>
#include <linux/inet.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/slab.h>
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/skbuff.h>
#include <net/route.h>
#include <net/ip6_route.h>
#include <linux/netfilter_ipv6.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_tuple.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <linux/netfilter/nf_conntrack_h323.h>
#define H323_MAX_SIZE 65535
/* Parameters */
static unsigned int default_rrq_ttl __read_mostly = 300;
module_param(default_rrq_ttl, uint, 0600);
MODULE_PARM_DESC(default_rrq_ttl, "use this TTL if it's missing in RRQ");
static int gkrouted_only __read_mostly = 1;
module_param(gkrouted_only, int, 0600);
MODULE_PARM_DESC(gkrouted_only, "only accept calls from gatekeeper");
static bool callforward_filter __read_mostly = true;
module_param(callforward_filter, bool, 0600);
MODULE_PARM_DESC(callforward_filter, "only create call forwarding expectations "
"if both endpoints are on different sides "
"(determined by routing information)");
const struct nfct_h323_nat_hooks __rcu *nfct_h323_nat_hook __read_mostly;
EXPORT_SYMBOL_GPL(nfct_h323_nat_hook);
static DEFINE_SPINLOCK(nf_h323_lock);
static char *h323_buffer;
static struct nf_conntrack_helper nf_conntrack_helper_h245;
static struct nf_conntrack_helper nf_conntrack_helper_q931[];
static struct nf_conntrack_helper nf_conntrack_helper_ras[];
static int get_tpkt_data(struct sk_buff *skb, unsigned int protoff,
struct nf_conn *ct, enum ip_conntrack_info ctinfo,
unsigned char **data, int *datalen, int *dataoff)
{
struct nf_ct_h323_master *info = nfct_help_data(ct);
int dir = CTINFO2DIR(ctinfo);
const struct tcphdr *th;
struct tcphdr _tcph;
int tcpdatalen;
int tcpdataoff;
unsigned char *tpkt;
int tpktlen;
int tpktoff;
/* Get TCP header */
th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
if (th == NULL)
return 0;
/* Get TCP data offset */
tcpdataoff = protoff + th->doff * 4;
/* Get TCP data length */
tcpdatalen = skb->len - tcpdataoff;
if (tcpdatalen <= 0) /* No TCP data */
goto clear_out;
if (tcpdatalen > H323_MAX_SIZE)
tcpdatalen = H323_MAX_SIZE;
if (*data == NULL) { /* first TPKT */
/* Get first TPKT pointer */
tpkt = skb_header_pointer(skb, tcpdataoff, tcpdatalen,
h323_buffer);
if (!tpkt)
goto clear_out;
/* Validate TPKT identifier */
if (tcpdatalen < 4 || tpkt[0] != 0x03 || tpkt[1] != 0) {
/* Netmeeting sends TPKT header and data separately */
if (info->tpkt_len[dir] > 0) {
pr_debug("nf_ct_h323: previous packet "
"indicated separate TPKT data of %hu "
"bytes\n", info->tpkt_len[dir]);
if (info->tpkt_len[dir] <= tcpdatalen) {
/* Yes, there was a TPKT header
* received */
*data = tpkt;
*datalen = info->tpkt_len[dir];
*dataoff = 0;
goto out;
}
/* Fragmented TPKT */
pr_debug("nf_ct_h323: fragmented TPKT\n");
goto clear_out;
}
/* It is not even a TPKT */
return 0;
}
tpktoff = 0;
} else { /* Next TPKT */
tpktoff = *dataoff + *datalen;
tcpdatalen -= tpktoff;
if (tcpdatalen <= 4) /* No more TPKT */
goto clear_out;
tpkt = *data + *datalen;
/* Validate TPKT identifier */
if (tpkt[0] != 0x03 || tpkt[1] != 0)
goto clear_out;
}
/* Validate TPKT length */
tpktlen = tpkt[2] * 256 + tpkt[3];
if (tpktlen < 4)
goto clear_out;
if (tpktlen > tcpdatalen) {
if (tcpdatalen == 4) { /* Separate TPKT header */
/* Netmeeting sends TPKT header and data separately */
pr_debug("nf_ct_h323: separate TPKT header indicates "
"there will be TPKT data of %d bytes\n",
tpktlen - 4);
info->tpkt_len[dir] = tpktlen - 4;
return 0;
}
pr_debug("nf_ct_h323: incomplete TPKT (fragmented?)\n");
goto clear_out;
}
/* This is the encapsulated data */
*data = tpkt + 4;
*datalen = tpktlen - 4;
*dataoff = tpktoff + 4;
out:
/* Clear TPKT length */
info->tpkt_len[dir] = 0;
return 1;
clear_out:
info->tpkt_len[dir] = 0;
return 0;
}
static int get_h245_addr(struct nf_conn *ct, const unsigned char *data,
H245_TransportAddress *taddr,
union nf_inet_addr *addr, __be16 *port)
{
const unsigned char *p;
int len;
if (taddr->choice != eH245_TransportAddress_unicastAddress)
return 0;
switch (taddr->unicastAddress.choice) {
case eUnicastAddress_iPAddress:
if (nf_ct_l3num(ct) != AF_INET)
return 0;
p = data + taddr->unicastAddress.iPAddress.network;
len = 4;
break;
case eUnicastAddress_iP6Address:
if (nf_ct_l3num(ct) != AF_INET6)
return 0;
p = data + taddr->unicastAddress.iP6Address.network;
len = 16;
break;
default:
return 0;
}
memcpy(addr, p, len);
memset((void *)addr + len, 0, sizeof(*addr) - len);
memcpy(port, p + len, sizeof(__be16));
return 1;
}
static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned char **data, int dataoff,
H245_TransportAddress *taddr)
{
const struct nfct_h323_nat_hooks *nathook;
int dir = CTINFO2DIR(ctinfo);
int ret = 0;
__be16 port;
__be16 rtp_port, rtcp_port;
union nf_inet_addr addr;
struct nf_conntrack_expect *rtp_exp;
struct nf_conntrack_expect *rtcp_exp;
/* Read RTP or RTCP address */
if (!get_h245_addr(ct, *data, taddr, &addr, &port) ||
memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) ||
port == 0)
return 0;
/* RTP port is even */
rtp_port = port & ~htons(1);
rtcp_port = port | htons(1);
/* Create expect for RTP */
if ((rtp_exp = nf_ct_expect_alloc(ct)) == NULL)
return -1;
nf_ct_expect_init(rtp_exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
&ct->tuplehash[!dir].tuple.src.u3,
&ct->tuplehash[!dir].tuple.dst.u3,
IPPROTO_UDP, NULL, &rtp_port);
/* Create expect for RTCP */
if ((rtcp_exp = nf_ct_expect_alloc(ct)) == NULL) {
nf_ct_expect_put(rtp_exp);
return -1;
}
nf_ct_expect_init(rtcp_exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
&ct->tuplehash[!dir].tuple.src.u3,
&ct->tuplehash[!dir].tuple.dst.u3,
IPPROTO_UDP, NULL, &rtcp_port);
nathook = rcu_dereference(nfct_h323_nat_hook);
if (memcmp(&ct->tuplehash[dir].tuple.src.u3,
&ct->tuplehash[!dir].tuple.dst.u3,
sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
nathook &&
nf_ct_l3num(ct) == NFPROTO_IPV4 &&
ct->status & IPS_NAT_MASK) {
/* NAT needed */
ret = nathook->nat_rtp_rtcp(skb, ct, ctinfo, protoff, data, dataoff,
taddr, port, rtp_port, rtp_exp, rtcp_exp);
} else { /* Conntrack only */
if (nf_ct_expect_related(rtp_exp, 0) == 0) {
if (nf_ct_expect_related(rtcp_exp, 0) == 0) {
pr_debug("nf_ct_h323: expect RTP ");
nf_ct_dump_tuple(&rtp_exp->tuple);
pr_debug("nf_ct_h323: expect RTCP ");
nf_ct_dump_tuple(&rtcp_exp->tuple);
} else {
nf_ct_unexpect_related(rtp_exp);
ret = -1;
}
} else
ret = -1;
}
nf_ct_expect_put(rtp_exp);
nf_ct_expect_put(rtcp_exp);
return ret;
}
static int expect_t120(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned char **data, int dataoff,
H245_TransportAddress *taddr)
{
const struct nfct_h323_nat_hooks *nathook;
int dir = CTINFO2DIR(ctinfo);
int ret = 0;
__be16 port;
union nf_inet_addr addr;
struct nf_conntrack_expect *exp;
/* Read T.120 address */
if (!get_h245_addr(ct, *data, taddr, &addr, &port) ||
memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) ||
port == 0)
return 0;
/* Create expect for T.120 connections */
if ((exp = nf_ct_expect_alloc(ct)) == NULL)
return -1;
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
&ct->tuplehash[!dir].tuple.src.u3,
&ct->tuplehash[!dir].tuple.dst.u3,
IPPROTO_TCP, NULL, &port);
exp->flags = NF_CT_EXPECT_PERMANENT; /* Accept multiple channels */
nathook = rcu_dereference(nfct_h323_nat_hook);
if (memcmp(&ct->tuplehash[dir].tuple.src.u3,
&ct->tuplehash[!dir].tuple.dst.u3,
sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
nathook &&
nf_ct_l3num(ct) == NFPROTO_IPV4 &&
ct->status & IPS_NAT_MASK) {
/* NAT needed */
ret = nathook->nat_t120(skb, ct, ctinfo, protoff, data,
dataoff, taddr, port, exp);
} else { /* Conntrack only */
if (nf_ct_expect_related(exp, 0) == 0) {
pr_debug("nf_ct_h323: expect T.120 ");
nf_ct_dump_tuple(&exp->tuple);
} else
ret = -1;
}
nf_ct_expect_put(exp);
return ret;
}
static int process_h245_channel(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned char **data, int dataoff,
H2250LogicalChannelParameters *channel)
{
int ret;
if (channel->options & eH2250LogicalChannelParameters_mediaChannel) {
/* RTP */
ret = expect_rtp_rtcp(skb, ct, ctinfo, protoff, data, dataoff,
&channel->mediaChannel);
if (ret < 0)
return -1;
}
if (channel->
options & eH2250LogicalChannelParameters_mediaControlChannel) {
/* RTCP */
ret = expect_rtp_rtcp(skb, ct, ctinfo, protoff, data, dataoff,
&channel->mediaControlChannel);
if (ret < 0)
return -1;
}
return 0;
}
static int process_olc(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned char **data, int dataoff,
OpenLogicalChannel *olc)
{
int ret;
pr_debug("nf_ct_h323: OpenLogicalChannel\n");
if (olc->forwardLogicalChannelParameters.multiplexParameters.choice ==
eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters)
{
ret = process_h245_channel(skb, ct, ctinfo,
protoff, data, dataoff,
&olc->
forwardLogicalChannelParameters.
multiplexParameters.
h2250LogicalChannelParameters);
if (ret < 0)
return -1;
}
if ((olc->options &
eOpenLogicalChannel_reverseLogicalChannelParameters) &&
(olc->reverseLogicalChannelParameters.options &
eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters)
&& (olc->reverseLogicalChannelParameters.multiplexParameters.
choice ==
eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters))
{
ret =
process_h245_channel(skb, ct, ctinfo,
protoff, data, dataoff,
&olc->
reverseLogicalChannelParameters.
multiplexParameters.
h2250LogicalChannelParameters);
if (ret < 0)
return -1;
}
if ((olc->options & eOpenLogicalChannel_separateStack) &&
olc->forwardLogicalChannelParameters.dataType.choice ==
eDataType_data &&
olc->forwardLogicalChannelParameters.dataType.data.application.
choice == eDataApplicationCapability_application_t120 &&
olc->forwardLogicalChannelParameters.dataType.data.application.
t120.choice == eDataProtocolCapability_separateLANStack &&
olc->separateStack.networkAddress.choice ==
eNetworkAccessParameters_networkAddress_localAreaAddress) {
ret = expect_t120(skb, ct, ctinfo, protoff, data, dataoff,
&olc->separateStack.networkAddress.
localAreaAddress);
if (ret < 0)
return -1;
}
return 0;
}
static int process_olca(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff, unsigned char **data, int dataoff,
OpenLogicalChannelAck *olca)
{
H2250LogicalChannelAckParameters *ack;
int ret;
pr_debug("nf_ct_h323: OpenLogicalChannelAck\n");
if ((olca->options &
eOpenLogicalChannelAck_reverseLogicalChannelParameters) &&
(olca->reverseLogicalChannelParameters.options &
eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters)
&& (olca->reverseLogicalChannelParameters.multiplexParameters.
choice ==
eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters))
{
ret = process_h245_channel(skb, ct, ctinfo,
protoff, data, dataoff,
&olca->
reverseLogicalChannelParameters.
multiplexParameters.
h2250LogicalChannelParameters);
if (ret < 0)
return -1;
}
if ((olca->options &
eOpenLogicalChannelAck_forwardMultiplexAckParameters) &&
(olca->forwardMultiplexAckParameters.choice ==
eOpenLogicalChannelAck_forwardMultiplexAckParameters_h2250LogicalChannelAckParameters))
{
ack = &olca->forwardMultiplexAckParameters.
h2250LogicalChannelAckParameters;
if (ack->options &
eH2250LogicalChannelAckParameters_mediaChannel) {
/* RTP */
ret = expect_rtp_rtcp(skb, ct, ctinfo,
protoff, data, dataoff,
&ack->mediaChannel);
if (ret < 0)
return -1;
}
if (ack->options &
eH2250LogicalChannelAckParameters_mediaControlChannel) {
/* RTCP */
ret = expect_rtp_rtcp(skb, ct, ctinfo,
protoff, data, dataoff,
&ack->mediaControlChannel);
if (ret < 0)
return -1;
}
}
if ((olca->options & eOpenLogicalChannelAck_separateStack) &&
olca->separateStack.networkAddress.choice ==
eNetworkAccessParameters_networkAddress_localAreaAddress) {
ret = expect_t120(skb, ct, ctinfo, protoff, data, dataoff,
&olca->separateStack.networkAddress.
localAreaAddress);
if (ret < 0)
return -1;
}
return 0;
}
static int process_h245(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff, unsigned char **data, int dataoff,
MultimediaSystemControlMessage *mscm)
{
switch (mscm->choice) {
case eMultimediaSystemControlMessage_request:
if (mscm->request.choice ==
eRequestMessage_openLogicalChannel) {
return process_olc(skb, ct, ctinfo,
protoff, data, dataoff,
&mscm->request.openLogicalChannel);
}
pr_debug("nf_ct_h323: H.245 Request %d\n",
mscm->request.choice);
break;
case eMultimediaSystemControlMessage_response:
if (mscm->response.choice ==
eResponseMessage_openLogicalChannelAck) {
return process_olca(skb, ct, ctinfo,
protoff, data, dataoff,
&mscm->response.
openLogicalChannelAck);
}
pr_debug("nf_ct_h323: H.245 Response %d\n",
mscm->response.choice);
break;
default:
pr_debug("nf_ct_h323: H.245 signal %d\n", mscm->choice);
break;
}
return 0;
}
static int h245_help(struct sk_buff *skb, unsigned int protoff,
struct nf_conn *ct, enum ip_conntrack_info ctinfo)
{
static MultimediaSystemControlMessage mscm;
unsigned char *data = NULL;
int datalen;
int dataoff;
int ret;
/* Until there's been traffic both ways, don't look in packets. */
if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
return NF_ACCEPT;
pr_debug("nf_ct_h245: skblen = %u\n", skb->len);
spin_lock_bh(&nf_h323_lock);
/* Process each TPKT */
while (get_tpkt_data(skb, protoff, ct, ctinfo,
&data, &datalen, &dataoff)) {
pr_debug("nf_ct_h245: TPKT len=%d ", datalen);
nf_ct_dump_tuple(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple);
/* Decode H.245 signal */
ret = DecodeMultimediaSystemControlMessage(data, datalen,
&mscm);
if (ret < 0) {
pr_debug("nf_ct_h245: decoding error: %s\n",
ret == H323_ERROR_BOUND ?
"out of bound" : "out of range");
/* We don't drop when decoding error */
break;
}
/* Process H.245 signal */
if (process_h245(skb, ct, ctinfo, protoff,
&data, dataoff, &mscm) < 0)
goto drop;
}
spin_unlock_bh(&nf_h323_lock);
return NF_ACCEPT;
drop:
spin_unlock_bh(&nf_h323_lock);
nf_ct_helper_log(skb, ct, "cannot process H.245 message");
return NF_DROP;
}
static const struct nf_conntrack_expect_policy h245_exp_policy = {
.max_expected = H323_RTP_CHANNEL_MAX * 4 + 2 /* T.120 */,
.timeout = 240,
};
static struct nf_conntrack_helper nf_conntrack_helper_h245 __read_mostly = {
.name = "H.245",
.me = THIS_MODULE,
.tuple.src.l3num = AF_UNSPEC,
.tuple.dst.protonum = IPPROTO_UDP,
.help = h245_help,
.expect_policy = &h245_exp_policy,
};
int get_h225_addr(struct nf_conn *ct, unsigned char *data,
TransportAddress *taddr,
union nf_inet_addr *addr, __be16 *port)
{
const unsigned char *p;
int len;
switch (taddr->choice) {
case eTransportAddress_ipAddress:
if (nf_ct_l3num(ct) != AF_INET)
return 0;
p = data + taddr->ipAddress.ip;
len = 4;
break;
case eTransportAddress_ip6Address:
if (nf_ct_l3num(ct) != AF_INET6)
return 0;
p = data + taddr->ip6Address.ip;
len = 16;
break;
default:
return 0;
}
memcpy(addr, p, len);
memset((void *)addr + len, 0, sizeof(*addr) - len);
memcpy(port, p + len, sizeof(__be16));
return 1;
}
EXPORT_SYMBOL_GPL(get_h225_addr);
static int expect_h245(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff, unsigned char **data, int dataoff,
TransportAddress *taddr)
{
const struct nfct_h323_nat_hooks *nathook;
int dir = CTINFO2DIR(ctinfo);
int ret = 0;
__be16 port;
union nf_inet_addr addr;
struct nf_conntrack_expect *exp;
/* Read h245Address */
if (!get_h225_addr(ct, *data, taddr, &addr, &port) ||
memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) ||
port == 0)
return 0;
/* Create expect for h245 connection */
if ((exp = nf_ct_expect_alloc(ct)) == NULL)
return -1;
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
&ct->tuplehash[!dir].tuple.src.u3,
&ct->tuplehash[!dir].tuple.dst.u3,
IPPROTO_TCP, NULL, &port);
exp->helper = &nf_conntrack_helper_h245;
nathook = rcu_dereference(nfct_h323_nat_hook);
if (memcmp(&ct->tuplehash[dir].tuple.src.u3,
&ct->tuplehash[!dir].tuple.dst.u3,
sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
nathook &&
nf_ct_l3num(ct) == NFPROTO_IPV4 &&
ct->status & IPS_NAT_MASK) {
/* NAT needed */
ret = nathook->nat_h245(skb, ct, ctinfo, protoff, data,
dataoff, taddr, port, exp);
} else { /* Conntrack only */
if (nf_ct_expect_related(exp, 0) == 0) {
pr_debug("nf_ct_q931: expect H.245 ");
nf_ct_dump_tuple(&exp->tuple);
} else
ret = -1;
}
nf_ct_expect_put(exp);
return ret;
}
/* If the calling party is on the same side of the forward-to party,
* we don't need to track the second call
*/
static int callforward_do_filter(struct net *net,
const union nf_inet_addr *src,
const union nf_inet_addr *dst,
u_int8_t family)
{
int ret = 0;
switch (family) {
case AF_INET: {
struct flowi4 fl1, fl2;
struct rtable *rt1, *rt2;
memset(&fl1, 0, sizeof(fl1));
fl1.daddr = src->ip;
memset(&fl2, 0, sizeof(fl2));
fl2.daddr = dst->ip;
if (!nf_ip_route(net, (struct dst_entry **)&rt1,
flowi4_to_flowi(&fl1), false)) {
if (!nf_ip_route(net, (struct dst_entry **)&rt2,
flowi4_to_flowi(&fl2), false)) {
if (rt_nexthop(rt1, fl1.daddr) ==
rt_nexthop(rt2, fl2.daddr) &&
rt1->dst.dev == rt2->dst.dev)
ret = 1;
dst_release(&rt2->dst);
}
dst_release(&rt1->dst);
}
break;
}
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6: {
struct rt6_info *rt1, *rt2;
struct flowi6 fl1, fl2;
memset(&fl1, 0, sizeof(fl1));
fl1.daddr = src->in6;
memset(&fl2, 0, sizeof(fl2));
fl2.daddr = dst->in6;
if (!nf_ip6_route(net, (struct dst_entry **)&rt1,
flowi6_to_flowi(&fl1), false)) {
if (!nf_ip6_route(net, (struct dst_entry **)&rt2,
flowi6_to_flowi(&fl2), false)) {
if (ipv6_addr_equal(rt6_nexthop(rt1, &fl1.daddr),
rt6_nexthop(rt2, &fl2.daddr)) &&
rt1->dst.dev == rt2->dst.dev)
ret = 1;
dst_release(&rt2->dst);
}
dst_release(&rt1->dst);
}
break;
}
#endif
}
return ret;
}
static int expect_callforwarding(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned char **data, int dataoff,
TransportAddress *taddr)
{
const struct nfct_h323_nat_hooks *nathook;
int dir = CTINFO2DIR(ctinfo);
int ret = 0;
__be16 port;
union nf_inet_addr addr;
struct nf_conntrack_expect *exp;
struct net *net = nf_ct_net(ct);
/* Read alternativeAddress */
if (!get_h225_addr(ct, *data, taddr, &addr, &port) || port == 0)
return 0;
/* If the calling party is on the same side of the forward-to party,
* we don't need to track the second call
*/
if (callforward_filter &&
callforward_do_filter(net, &addr, &ct->tuplehash[!dir].tuple.src.u3,
nf_ct_l3num(ct))) {
pr_debug("nf_ct_q931: Call Forwarding not tracked\n");
return 0;
}
/* Create expect for the second call leg */
if ((exp = nf_ct_expect_alloc(ct)) == NULL)
return -1;
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
&ct->tuplehash[!dir].tuple.src.u3, &addr,
IPPROTO_TCP, NULL, &port);
exp->helper = nf_conntrack_helper_q931;
nathook = rcu_dereference(nfct_h323_nat_hook);
if (memcmp(&ct->tuplehash[dir].tuple.src.u3,
&ct->tuplehash[!dir].tuple.dst.u3,
sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
nathook &&
nf_ct_l3num(ct) == NFPROTO_IPV4 &&
ct->status & IPS_NAT_MASK) {
/* Need NAT */
ret = nathook->nat_callforwarding(skb, ct, ctinfo,
protoff, data, dataoff,
taddr, port, exp);
} else { /* Conntrack only */
if (nf_ct_expect_related(exp, 0) == 0) {
pr_debug("nf_ct_q931: expect Call Forwarding ");
nf_ct_dump_tuple(&exp->tuple);
} else
ret = -1;
}
nf_ct_expect_put(exp);
return ret;
}
static int process_setup(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned char **data, int dataoff,
Setup_UUIE *setup)
{
const struct nfct_h323_nat_hooks *nathook;
int dir = CTINFO2DIR(ctinfo);
int ret;
int i;
__be16 port;
union nf_inet_addr addr;
pr_debug("nf_ct_q931: Setup\n");
if (setup->options & eSetup_UUIE_h245Address) {
ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff,
&setup->h245Address);
if (ret < 0)
return -1;
}
nathook = rcu_dereference(nfct_h323_nat_hook);
if ((setup->options & eSetup_UUIE_destCallSignalAddress) &&
nathook && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
ct->status & IPS_NAT_MASK &&
get_h225_addr(ct, *data, &setup->destCallSignalAddress,
&addr, &port) &&
memcmp(&addr, &ct->tuplehash[!dir].tuple.src.u3, sizeof(addr))) {
pr_debug("nf_ct_q931: set destCallSignalAddress %pI6:%hu->%pI6:%hu\n",
&addr, ntohs(port), &ct->tuplehash[!dir].tuple.src.u3,
ntohs(ct->tuplehash[!dir].tuple.src.u.tcp.port));
ret = nathook->set_h225_addr(skb, protoff, data, dataoff,
&setup->destCallSignalAddress,
&ct->tuplehash[!dir].tuple.src.u3,
ct->tuplehash[!dir].tuple.src.u.tcp.port);
if (ret < 0)
return -1;
}
if ((setup->options & eSetup_UUIE_sourceCallSignalAddress) &&
nathook && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
ct->status & IPS_NAT_MASK &&
get_h225_addr(ct, *data, &setup->sourceCallSignalAddress,
&addr, &port) &&
memcmp(&addr, &ct->tuplehash[!dir].tuple.dst.u3, sizeof(addr))) {
pr_debug("nf_ct_q931: set sourceCallSignalAddress %pI6:%hu->%pI6:%hu\n",
&addr, ntohs(port), &ct->tuplehash[!dir].tuple.dst.u3,
ntohs(ct->tuplehash[!dir].tuple.dst.u.tcp.port));
ret = nathook->set_h225_addr(skb, protoff, data, dataoff,
&setup->sourceCallSignalAddress,
&ct->tuplehash[!dir].tuple.dst.u3,
ct->tuplehash[!dir].tuple.dst.u.tcp.port);
if (ret < 0)
return -1;
}
if (setup->options & eSetup_UUIE_fastStart) {
for (i = 0; i < setup->fastStart.count; i++) {
ret = process_olc(skb, ct, ctinfo,
protoff, data, dataoff,
&setup->fastStart.item[i]);
if (ret < 0)
return -1;
}
}
return 0;
}
static int process_callproceeding(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned char **data, int dataoff,
CallProceeding_UUIE *callproc)
{
int ret;
int i;
pr_debug("nf_ct_q931: CallProceeding\n");
if (callproc->options & eCallProceeding_UUIE_h245Address) {
ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff,
&callproc->h245Address);
if (ret < 0)
return -1;
}
if (callproc->options & eCallProceeding_UUIE_fastStart) {
for (i = 0; i < callproc->fastStart.count; i++) {
ret = process_olc(skb, ct, ctinfo,
protoff, data, dataoff,
&callproc->fastStart.item[i]);
if (ret < 0)
return -1;
}
}
return 0;
}
static int process_connect(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned char **data, int dataoff,
Connect_UUIE *connect)
{
int ret;
int i;
pr_debug("nf_ct_q931: Connect\n");
if (connect->options & eConnect_UUIE_h245Address) {
ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff,
&connect->h245Address);
if (ret < 0)
return -1;
}
if (connect->options & eConnect_UUIE_fastStart) {
for (i = 0; i < connect->fastStart.count; i++) {
ret = process_olc(skb, ct, ctinfo,
protoff, data, dataoff,
&connect->fastStart.item[i]);
if (ret < 0)
return -1;
}
}
return 0;
}
static int process_alerting(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned char **data, int dataoff,
Alerting_UUIE *alert)
{
int ret;
int i;
pr_debug("nf_ct_q931: Alerting\n");
if (alert->options & eAlerting_UUIE_h245Address) {
ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff,
&alert->h245Address);
if (ret < 0)
return -1;
}
if (alert->options & eAlerting_UUIE_fastStart) {
for (i = 0; i < alert->fastStart.count; i++) {
ret = process_olc(skb, ct, ctinfo,
protoff, data, dataoff,
&alert->fastStart.item[i]);
if (ret < 0)
return -1;
}
}
return 0;
}
static int process_facility(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned char **data, int dataoff,
Facility_UUIE *facility)
{
int ret;
int i;
pr_debug("nf_ct_q931: Facility\n");
if (facility->reason.choice == eFacilityReason_callForwarded) {
if (facility->options & eFacility_UUIE_alternativeAddress)
return expect_callforwarding(skb, ct, ctinfo,
protoff, data, dataoff,
&facility->
alternativeAddress);
return 0;
}
if (facility->options & eFacility_UUIE_h245Address) {
ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff,
&facility->h245Address);
if (ret < 0)
return -1;
}
if (facility->options & eFacility_UUIE_fastStart) {
for (i = 0; i < facility->fastStart.count; i++) {
ret = process_olc(skb, ct, ctinfo,
protoff, data, dataoff,
&facility->fastStart.item[i]);
if (ret < 0)
return -1;
}
}
return 0;
}
static int process_progress(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned char **data, int dataoff,
Progress_UUIE *progress)
{
int ret;
int i;
pr_debug("nf_ct_q931: Progress\n");
if (progress->options & eProgress_UUIE_h245Address) {
ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff,
&progress->h245Address);
if (ret < 0)
return -1;
}
if (progress->options & eProgress_UUIE_fastStart) {
for (i = 0; i < progress->fastStart.count; i++) {
ret = process_olc(skb, ct, ctinfo,
protoff, data, dataoff,
&progress->fastStart.item[i]);
if (ret < 0)
return -1;
}
}
return 0;
}
static int process_q931(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff, unsigned char **data, int dataoff,
Q931 *q931)
{
H323_UU_PDU *pdu = &q931->UUIE.h323_uu_pdu;
int i;
int ret = 0;
switch (pdu->h323_message_body.choice) {
case eH323_UU_PDU_h323_message_body_setup:
ret = process_setup(skb, ct, ctinfo, protoff, data, dataoff,
&pdu->h323_message_body.setup);
break;
case eH323_UU_PDU_h323_message_body_callProceeding:
ret = process_callproceeding(skb, ct, ctinfo,
protoff, data, dataoff,
&pdu->h323_message_body.
callProceeding);
break;
case eH323_UU_PDU_h323_message_body_connect:
ret = process_connect(skb, ct, ctinfo, protoff, data, dataoff,
&pdu->h323_message_body.connect);
break;
case eH323_UU_PDU_h323_message_body_alerting:
ret = process_alerting(skb, ct, ctinfo, protoff, data, dataoff,
&pdu->h323_message_body.alerting);
break;
case eH323_UU_PDU_h323_message_body_facility:
ret = process_facility(skb, ct, ctinfo, protoff, data, dataoff,
&pdu->h323_message_body.facility);
break;
case eH323_UU_PDU_h323_message_body_progress:
ret = process_progress(skb, ct, ctinfo, protoff, data, dataoff,
&pdu->h323_message_body.progress);
break;
default:
pr_debug("nf_ct_q931: Q.931 signal %d\n",
pdu->h323_message_body.choice);
break;
}
if (ret < 0)
return -1;
if (pdu->options & eH323_UU_PDU_h245Control) {
for (i = 0; i < pdu->h245Control.count; i++) {
ret = process_h245(skb, ct, ctinfo,
protoff, data, dataoff,
&pdu->h245Control.item[i]);
if (ret < 0)
return -1;
}
}
return 0;
}
static int q931_help(struct sk_buff *skb, unsigned int protoff,
struct nf_conn *ct, enum ip_conntrack_info ctinfo)
{
static Q931 q931;
unsigned char *data = NULL;
int datalen;
int dataoff;
int ret;
/* Until there's been traffic both ways, don't look in packets. */
if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
return NF_ACCEPT;
pr_debug("nf_ct_q931: skblen = %u\n", skb->len);
spin_lock_bh(&nf_h323_lock);
/* Process each TPKT */
while (get_tpkt_data(skb, protoff, ct, ctinfo,
&data, &datalen, &dataoff)) {
pr_debug("nf_ct_q931: TPKT len=%d ", datalen);
nf_ct_dump_tuple(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple);
/* Decode Q.931 signal */
ret = DecodeQ931(data, datalen, &q931);
if (ret < 0) {
pr_debug("nf_ct_q931: decoding error: %s\n",
ret == H323_ERROR_BOUND ?
"out of bound" : "out of range");
/* We don't drop when decoding error */
break;
}
/* Process Q.931 signal */
if (process_q931(skb, ct, ctinfo, protoff,
&data, dataoff, &q931) < 0)
goto drop;
}
spin_unlock_bh(&nf_h323_lock);
return NF_ACCEPT;
drop:
spin_unlock_bh(&nf_h323_lock);
nf_ct_helper_log(skb, ct, "cannot process Q.931 message");
return NF_DROP;
}
static const struct nf_conntrack_expect_policy q931_exp_policy = {
/* T.120 and H.245 */
.max_expected = H323_RTP_CHANNEL_MAX * 4 + 4,
.timeout = 240,
};
static struct nf_conntrack_helper nf_conntrack_helper_q931[] __read_mostly = {
{
.name = "Q.931",
.me = THIS_MODULE,
.tuple.src.l3num = AF_INET,
.tuple.src.u.tcp.port = cpu_to_be16(Q931_PORT),
.tuple.dst.protonum = IPPROTO_TCP,
.help = q931_help,
.expect_policy = &q931_exp_policy,
},
{
.name = "Q.931",
.me = THIS_MODULE,
.tuple.src.l3num = AF_INET6,
.tuple.src.u.tcp.port = cpu_to_be16(Q931_PORT),
.tuple.dst.protonum = IPPROTO_TCP,
.help = q931_help,
.expect_policy = &q931_exp_policy,
},
};
static unsigned char *get_udp_data(struct sk_buff *skb, unsigned int protoff,
int *datalen)
{
const struct udphdr *uh;
struct udphdr _uh;
int dataoff;
uh = skb_header_pointer(skb, protoff, sizeof(_uh), &_uh);
if (uh == NULL)
return NULL;
dataoff = protoff + sizeof(_uh);
if (dataoff >= skb->len)
return NULL;
*datalen = skb->len - dataoff;
if (*datalen > H323_MAX_SIZE)
*datalen = H323_MAX_SIZE;
return skb_header_pointer(skb, dataoff, *datalen, h323_buffer);
}
static struct nf_conntrack_expect *find_expect(struct nf_conn *ct,
union nf_inet_addr *addr,
__be16 port)
{
struct net *net = nf_ct_net(ct);
struct nf_conntrack_expect *exp;
struct nf_conntrack_tuple tuple;
memset(&tuple.src.u3, 0, sizeof(tuple.src.u3));
tuple.src.u.tcp.port = 0;
memcpy(&tuple.dst.u3, addr, sizeof(tuple.dst.u3));
tuple.dst.u.tcp.port = port;
tuple.dst.protonum = IPPROTO_TCP;
exp = __nf_ct_expect_find(net, nf_ct_zone(ct), &tuple);
if (exp && exp->master == ct)
return exp;
return NULL;
}
static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff, unsigned char **data,
TransportAddress *taddr, int count)
{
struct nf_ct_h323_master *info = nfct_help_data(ct);
const struct nfct_h323_nat_hooks *nathook;
int dir = CTINFO2DIR(ctinfo);
int ret = 0;
int i;
__be16 port;
union nf_inet_addr addr;
struct nf_conntrack_expect *exp;
/* Look for the first related address */
for (i = 0; i < count; i++) {
if (get_h225_addr(ct, *data, &taddr[i], &addr, &port) &&
memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3,
sizeof(addr)) == 0 && port != 0)
break;
}
if (i >= count) /* Not found */
return 0;
/* Create expect for Q.931 */
if ((exp = nf_ct_expect_alloc(ct)) == NULL)
return -1;
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
gkrouted_only ? /* only accept calls from GK? */
&ct->tuplehash[!dir].tuple.src.u3 : NULL,
&ct->tuplehash[!dir].tuple.dst.u3,
IPPROTO_TCP, NULL, &port);
exp->helper = nf_conntrack_helper_q931;
exp->flags = NF_CT_EXPECT_PERMANENT; /* Accept multiple calls */
nathook = rcu_dereference(nfct_h323_nat_hook);
if (nathook && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
ct->status & IPS_NAT_MASK) { /* Need NAT */
ret = nathook->nat_q931(skb, ct, ctinfo, protoff, data,
taddr, i, port, exp);
} else { /* Conntrack only */
if (nf_ct_expect_related(exp, 0) == 0) {
pr_debug("nf_ct_ras: expect Q.931 ");
nf_ct_dump_tuple(&exp->tuple);
/* Save port for looking up expect in processing RCF */
info->sig_port[dir] = port;
} else
ret = -1;
}
nf_ct_expect_put(exp);
return ret;
}
static int process_grq(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned char **data, GatekeeperRequest *grq)
{
const struct nfct_h323_nat_hooks *nathook;
pr_debug("nf_ct_ras: GRQ\n");
nathook = rcu_dereference(nfct_h323_nat_hook);
if (nathook && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
ct->status & IPS_NAT_MASK) /* NATed */
return nathook->set_ras_addr(skb, ct, ctinfo, protoff, data,
&grq->rasAddress, 1);
return 0;
}
static int process_gcf(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned char **data, GatekeeperConfirm *gcf)
{
int dir = CTINFO2DIR(ctinfo);
int ret = 0;
__be16 port;
union nf_inet_addr addr;
struct nf_conntrack_expect *exp;
pr_debug("nf_ct_ras: GCF\n");
if (!get_h225_addr(ct, *data, &gcf->rasAddress, &addr, &port))
return 0;
/* Registration port is the same as discovery port */
if (!memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) &&
port == ct->tuplehash[dir].tuple.src.u.udp.port)
return 0;
/* Avoid RAS expectation loops. A GCF is never expected. */
if (test_bit(IPS_EXPECTED_BIT, &ct->status))
return 0;
/* Need new expect */
if ((exp = nf_ct_expect_alloc(ct)) == NULL)
return -1;
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
&ct->tuplehash[!dir].tuple.src.u3, &addr,
IPPROTO_UDP, NULL, &port);
exp->helper = nf_conntrack_helper_ras;
if (nf_ct_expect_related(exp, 0) == 0) {
pr_debug("nf_ct_ras: expect RAS ");
nf_ct_dump_tuple(&exp->tuple);
} else
ret = -1;
nf_ct_expect_put(exp);
return ret;
}
static int process_rrq(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned char **data, RegistrationRequest *rrq)
{
struct nf_ct_h323_master *info = nfct_help_data(ct);
const struct nfct_h323_nat_hooks *nathook;
int ret;
pr_debug("nf_ct_ras: RRQ\n");
ret = expect_q931(skb, ct, ctinfo, protoff, data,
rrq->callSignalAddress.item,
rrq->callSignalAddress.count);
if (ret < 0)
return -1;
nathook = rcu_dereference(nfct_h323_nat_hook);
if (nathook && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
ct->status & IPS_NAT_MASK) {
ret = nathook->set_ras_addr(skb, ct, ctinfo, protoff, data,
rrq->rasAddress.item,
rrq->rasAddress.count);
if (ret < 0)
return -1;
}
if (rrq->options & eRegistrationRequest_timeToLive) {
pr_debug("nf_ct_ras: RRQ TTL = %u seconds\n", rrq->timeToLive);
info->timeout = rrq->timeToLive;
} else
info->timeout = default_rrq_ttl;
return 0;
}
static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned char **data, RegistrationConfirm *rcf)
{
struct nf_ct_h323_master *info = nfct_help_data(ct);
const struct nfct_h323_nat_hooks *nathook;
int dir = CTINFO2DIR(ctinfo);
int ret;
struct nf_conntrack_expect *exp;
pr_debug("nf_ct_ras: RCF\n");
nathook = rcu_dereference(nfct_h323_nat_hook);
if (nathook && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
ct->status & IPS_NAT_MASK) {
ret = nathook->set_sig_addr(skb, ct, ctinfo, protoff, data,
rcf->callSignalAddress.item,
rcf->callSignalAddress.count);
if (ret < 0)
return -1;
}
if (rcf->options & eRegistrationConfirm_timeToLive) {
pr_debug("nf_ct_ras: RCF TTL = %u seconds\n", rcf->timeToLive);
info->timeout = rcf->timeToLive;
}
if (info->timeout > 0) {
pr_debug("nf_ct_ras: set RAS connection timeout to "
"%u seconds\n", info->timeout);
nf_ct_refresh(ct, skb, info->timeout * HZ);
/* Set expect timeout */
spin_lock_bh(&nf_conntrack_expect_lock);
exp = find_expect(ct, &ct->tuplehash[dir].tuple.dst.u3,
info->sig_port[!dir]);
if (exp) {
pr_debug("nf_ct_ras: set Q.931 expect "
"timeout to %u seconds for",
info->timeout);
nf_ct_dump_tuple(&exp->tuple);
mod_timer_pending(&exp->timeout,
jiffies + info->timeout * HZ);
}
spin_unlock_bh(&nf_conntrack_expect_lock);
}
return 0;
}
static int process_urq(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned char **data, UnregistrationRequest *urq)
{
struct nf_ct_h323_master *info = nfct_help_data(ct);
const struct nfct_h323_nat_hooks *nathook;
int dir = CTINFO2DIR(ctinfo);
int ret;
pr_debug("nf_ct_ras: URQ\n");
nathook = rcu_dereference(nfct_h323_nat_hook);
if (nathook && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
ct->status & IPS_NAT_MASK) {
ret = nathook->set_sig_addr(skb, ct, ctinfo, protoff, data,
urq->callSignalAddress.item,
urq->callSignalAddress.count);
if (ret < 0)
return -1;
}
/* Clear old expect */
nf_ct_remove_expectations(ct);
info->sig_port[dir] = 0;
info->sig_port[!dir] = 0;
/* Give it 30 seconds for UCF or URJ */
nf_ct_refresh(ct, skb, 30 * HZ);
return 0;
}
static int process_arq(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned char **data, AdmissionRequest *arq)
{
const struct nf_ct_h323_master *info = nfct_help_data(ct);
const struct nfct_h323_nat_hooks *nathook;
int dir = CTINFO2DIR(ctinfo);
__be16 port;
union nf_inet_addr addr;
pr_debug("nf_ct_ras: ARQ\n");
nathook = rcu_dereference(nfct_h323_nat_hook);
if (!nathook)
return 0;
if ((arq->options & eAdmissionRequest_destCallSignalAddress) &&
get_h225_addr(ct, *data, &arq->destCallSignalAddress,
&addr, &port) &&
!memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) &&
port == info->sig_port[dir] &&
nf_ct_l3num(ct) == NFPROTO_IPV4 &&
ct->status & IPS_NAT_MASK) {
/* Answering ARQ */
return nathook->set_h225_addr(skb, protoff, data, 0,
&arq->destCallSignalAddress,
&ct->tuplehash[!dir].tuple.dst.u3,
info->sig_port[!dir]);
}
if ((arq->options & eAdmissionRequest_srcCallSignalAddress) &&
get_h225_addr(ct, *data, &arq->srcCallSignalAddress,
&addr, &port) &&
!memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) &&
nf_ct_l3num(ct) == NFPROTO_IPV4 &&
ct->status & IPS_NAT_MASK) {
/* Calling ARQ */
return nathook->set_h225_addr(skb, protoff, data, 0,
&arq->srcCallSignalAddress,
&ct->tuplehash[!dir].tuple.dst.u3,
port);
}
return 0;
}
static int process_acf(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned char **data, AdmissionConfirm *acf)
{
int dir = CTINFO2DIR(ctinfo);
int ret = 0;
__be16 port;
union nf_inet_addr addr;
struct nf_conntrack_expect *exp;
pr_debug("nf_ct_ras: ACF\n");
if (!get_h225_addr(ct, *data, &acf->destCallSignalAddress,
&addr, &port))
return 0;
if (!memcmp(&addr, &ct->tuplehash[dir].tuple.dst.u3, sizeof(addr))) {
const struct nfct_h323_nat_hooks *nathook;
/* Answering ACF */
nathook = rcu_dereference(nfct_h323_nat_hook);
if (nathook && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
ct->status & IPS_NAT_MASK)
return nathook->set_sig_addr(skb, ct, ctinfo, protoff,
data,
&acf->destCallSignalAddress, 1);
return 0;
}
/* Need new expect */
if ((exp = nf_ct_expect_alloc(ct)) == NULL)
return -1;
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
&ct->tuplehash[!dir].tuple.src.u3, &addr,
IPPROTO_TCP, NULL, &port);
exp->flags = NF_CT_EXPECT_PERMANENT;
exp->helper = nf_conntrack_helper_q931;
if (nf_ct_expect_related(exp, 0) == 0) {
pr_debug("nf_ct_ras: expect Q.931 ");
nf_ct_dump_tuple(&exp->tuple);
} else
ret = -1;
nf_ct_expect_put(exp);
return ret;
}
static int process_lrq(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned char **data, LocationRequest *lrq)
{
const struct nfct_h323_nat_hooks *nathook;
pr_debug("nf_ct_ras: LRQ\n");
nathook = rcu_dereference(nfct_h323_nat_hook);
if (nathook && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
ct->status & IPS_NAT_MASK)
return nathook->set_ras_addr(skb, ct, ctinfo, protoff, data,
&lrq->replyAddress, 1);
return 0;
}
static int process_lcf(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned char **data, LocationConfirm *lcf)
{
int dir = CTINFO2DIR(ctinfo);
int ret = 0;
__be16 port;
union nf_inet_addr addr;
struct nf_conntrack_expect *exp;
pr_debug("nf_ct_ras: LCF\n");
if (!get_h225_addr(ct, *data, &lcf->callSignalAddress,
&addr, &port))
return 0;
/* Need new expect for call signal */
if ((exp = nf_ct_expect_alloc(ct)) == NULL)
return -1;
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
&ct->tuplehash[!dir].tuple.src.u3, &addr,
IPPROTO_TCP, NULL, &port);
exp->flags = NF_CT_EXPECT_PERMANENT;
exp->helper = nf_conntrack_helper_q931;
if (nf_ct_expect_related(exp, 0) == 0) {
pr_debug("nf_ct_ras: expect Q.931 ");
nf_ct_dump_tuple(&exp->tuple);
} else
ret = -1;
nf_ct_expect_put(exp);
/* Ignore rasAddress */
return ret;
}
static int process_irr(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned char **data, InfoRequestResponse *irr)
{
const struct nfct_h323_nat_hooks *nathook;
int ret;
pr_debug("nf_ct_ras: IRR\n");
nathook = rcu_dereference(nfct_h323_nat_hook);
if (nathook && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
ct->status & IPS_NAT_MASK) {
ret = nathook->set_ras_addr(skb, ct, ctinfo, protoff, data,
&irr->rasAddress, 1);
if (ret < 0)
return -1;
ret = nathook->set_sig_addr(skb, ct, ctinfo, protoff, data,
irr->callSignalAddress.item,
irr->callSignalAddress.count);
if (ret < 0)
return -1;
}
return 0;
}
static int process_ras(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned char **data, RasMessage *ras)
{
switch (ras->choice) {
case eRasMessage_gatekeeperRequest:
return process_grq(skb, ct, ctinfo, protoff, data,
&ras->gatekeeperRequest);
case eRasMessage_gatekeeperConfirm:
return process_gcf(skb, ct, ctinfo, protoff, data,
&ras->gatekeeperConfirm);
case eRasMessage_registrationRequest:
return process_rrq(skb, ct, ctinfo, protoff, data,
&ras->registrationRequest);
case eRasMessage_registrationConfirm:
return process_rcf(skb, ct, ctinfo, protoff, data,
&ras->registrationConfirm);
case eRasMessage_unregistrationRequest:
return process_urq(skb, ct, ctinfo, protoff, data,
&ras->unregistrationRequest);
case eRasMessage_admissionRequest:
return process_arq(skb, ct, ctinfo, protoff, data,
&ras->admissionRequest);
case eRasMessage_admissionConfirm:
return process_acf(skb, ct, ctinfo, protoff, data,
&ras->admissionConfirm);
case eRasMessage_locationRequest:
return process_lrq(skb, ct, ctinfo, protoff, data,
&ras->locationRequest);
case eRasMessage_locationConfirm:
return process_lcf(skb, ct, ctinfo, protoff, data,
&ras->locationConfirm);
case eRasMessage_infoRequestResponse:
return process_irr(skb, ct, ctinfo, protoff, data,
&ras->infoRequestResponse);
default:
pr_debug("nf_ct_ras: RAS message %d\n", ras->choice);
break;
}
return 0;
}
static int ras_help(struct sk_buff *skb, unsigned int protoff,
struct nf_conn *ct, enum ip_conntrack_info ctinfo)
{
static RasMessage ras;
unsigned char *data;
int datalen = 0;
int ret;
pr_debug("nf_ct_ras: skblen = %u\n", skb->len);
spin_lock_bh(&nf_h323_lock);
/* Get UDP data */
data = get_udp_data(skb, protoff, &datalen);
if (data == NULL)
goto accept;
pr_debug("nf_ct_ras: RAS message len=%d ", datalen);
nf_ct_dump_tuple(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple);
/* Decode RAS message */
ret = DecodeRasMessage(data, datalen, &ras);
if (ret < 0) {
pr_debug("nf_ct_ras: decoding error: %s\n",
ret == H323_ERROR_BOUND ?
"out of bound" : "out of range");
goto accept;
}
/* Process RAS message */
if (process_ras(skb, ct, ctinfo, protoff, &data, &ras) < 0)
goto drop;
accept:
spin_unlock_bh(&nf_h323_lock);
return NF_ACCEPT;
drop:
spin_unlock_bh(&nf_h323_lock);
nf_ct_helper_log(skb, ct, "cannot process RAS message");
return NF_DROP;
}
static const struct nf_conntrack_expect_policy ras_exp_policy = {
.max_expected = 32,
.timeout = 240,
};
static struct nf_conntrack_helper nf_conntrack_helper_ras[] __read_mostly = {
{
.name = "RAS",
.me = THIS_MODULE,
.tuple.src.l3num = AF_INET,
.tuple.src.u.udp.port = cpu_to_be16(RAS_PORT),
.tuple.dst.protonum = IPPROTO_UDP,
.help = ras_help,
.expect_policy = &ras_exp_policy,
},
{
.name = "RAS",
.me = THIS_MODULE,
.tuple.src.l3num = AF_INET6,
.tuple.src.u.udp.port = cpu_to_be16(RAS_PORT),
.tuple.dst.protonum = IPPROTO_UDP,
.help = ras_help,
.expect_policy = &ras_exp_policy,
},
};
static int __init h323_helper_init(void)
{
int ret;
ret = nf_conntrack_helper_register(&nf_conntrack_helper_h245);
if (ret < 0)
return ret;
ret = nf_conntrack_helpers_register(nf_conntrack_helper_q931,
ARRAY_SIZE(nf_conntrack_helper_q931));
if (ret < 0)
goto err1;
ret = nf_conntrack_helpers_register(nf_conntrack_helper_ras,
ARRAY_SIZE(nf_conntrack_helper_ras));
if (ret < 0)
goto err2;
return 0;
err2:
nf_conntrack_helpers_unregister(nf_conntrack_helper_q931,
ARRAY_SIZE(nf_conntrack_helper_q931));
err1:
nf_conntrack_helper_unregister(&nf_conntrack_helper_h245);
return ret;
}
static void __exit h323_helper_exit(void)
{
nf_conntrack_helpers_unregister(nf_conntrack_helper_ras,
ARRAY_SIZE(nf_conntrack_helper_ras));
nf_conntrack_helpers_unregister(nf_conntrack_helper_q931,
ARRAY_SIZE(nf_conntrack_helper_q931));
nf_conntrack_helper_unregister(&nf_conntrack_helper_h245);
}
static void __exit nf_conntrack_h323_fini(void)
{
h323_helper_exit();
kfree(h323_buffer);
pr_debug("nf_ct_h323: fini\n");
}
static int __init nf_conntrack_h323_init(void)
{
int ret;
NF_CT_HELPER_BUILD_BUG_ON(sizeof(struct nf_ct_h323_master));
h323_buffer = kmalloc(H323_MAX_SIZE + 1, GFP_KERNEL);
if (!h323_buffer)
return -ENOMEM;
ret = h323_helper_init();
if (ret < 0)
goto err1;
pr_debug("nf_ct_h323: init success\n");
return 0;
err1:
kfree(h323_buffer);
return ret;
}
module_init(nf_conntrack_h323_init);
module_exit(nf_conntrack_h323_fini);
MODULE_AUTHOR("Jing Min Zhao <[email protected]>");
MODULE_DESCRIPTION("H.323 connection tracking helper");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ip_conntrack_h323");
MODULE_ALIAS_NFCT_HELPER("RAS");
MODULE_ALIAS_NFCT_HELPER("Q.931");
MODULE_ALIAS_NFCT_HELPER("H.245");
| linux-master | net/netfilter/nf_conntrack_h323_main.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* xt_HMARK - Netfilter module to set mark by means of hashing
*
* (C) 2012 by Hans Schillstrom <[email protected]>
* (C) 2012 by Pablo Neira Ayuso <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/icmp.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_HMARK.h>
#include <net/ip.h>
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <net/netfilter/nf_conntrack.h>
#endif
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
#include <net/ipv6.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#endif
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Hans Schillstrom <[email protected]>");
MODULE_DESCRIPTION("Xtables: packet marking using hash calculation");
MODULE_ALIAS("ipt_HMARK");
MODULE_ALIAS("ip6t_HMARK");
struct hmark_tuple {
__be32 src;
__be32 dst;
union hmark_ports uports;
u8 proto;
};
static inline __be32 hmark_addr6_mask(const __be32 *addr32, const __be32 *mask)
{
return (addr32[0] & mask[0]) ^
(addr32[1] & mask[1]) ^
(addr32[2] & mask[2]) ^
(addr32[3] & mask[3]);
}
static inline __be32
hmark_addr_mask(int l3num, const __be32 *addr32, const __be32 *mask)
{
switch (l3num) {
case AF_INET:
return *addr32 & *mask;
case AF_INET6:
return hmark_addr6_mask(addr32, mask);
}
return 0;
}
static inline void hmark_swap_ports(union hmark_ports *uports,
const struct xt_hmark_info *info)
{
union hmark_ports hp;
u16 src, dst;
hp.b32 = (uports->b32 & info->port_mask.b32) | info->port_set.b32;
src = ntohs(hp.b16.src);
dst = ntohs(hp.b16.dst);
if (dst > src)
uports->v32 = (dst << 16) | src;
else
uports->v32 = (src << 16) | dst;
}
static int
hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t,
const struct xt_hmark_info *info)
{
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
struct nf_conntrack_tuple *otuple;
struct nf_conntrack_tuple *rtuple;
if (ct == NULL)
return -1;
otuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
rtuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.ip6,
info->src_mask.ip6);
t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.ip6,
info->dst_mask.ip6);
if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
return 0;
t->proto = nf_ct_protonum(ct);
if (t->proto != IPPROTO_ICMP) {
t->uports.b16.src = otuple->src.u.all;
t->uports.b16.dst = rtuple->src.u.all;
hmark_swap_ports(&t->uports, info);
}
return 0;
#else
return -1;
#endif
}
/* This hash function is endian independent, to ensure consistent hashing if
* the cluster is composed of big and little endian systems. */
static inline u32
hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info)
{
u32 hash;
u32 src = ntohl(t->src);
u32 dst = ntohl(t->dst);
if (dst < src)
swap(src, dst);
hash = jhash_3words(src, dst, t->uports.v32, info->hashrnd);
hash = hash ^ (t->proto & info->proto_mask);
return reciprocal_scale(hash, info->hmodulus) + info->hoffset;
}
static void
hmark_set_tuple_ports(const struct sk_buff *skb, unsigned int nhoff,
struct hmark_tuple *t, const struct xt_hmark_info *info)
{
int protoff;
protoff = proto_ports_offset(t->proto);
if (protoff < 0)
return;
nhoff += protoff;
if (skb_copy_bits(skb, nhoff, &t->uports, sizeof(t->uports)) < 0)
return;
hmark_swap_ports(&t->uports, info);
}
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
static int get_inner6_hdr(const struct sk_buff *skb, int *offset)
{
struct icmp6hdr *icmp6h, _ih6;
icmp6h = skb_header_pointer(skb, *offset, sizeof(_ih6), &_ih6);
if (icmp6h == NULL)
return 0;
if (icmp6h->icmp6_type && icmp6h->icmp6_type < 128) {
*offset += sizeof(struct icmp6hdr);
return 1;
}
return 0;
}
static int
hmark_pkt_set_htuple_ipv6(const struct sk_buff *skb, struct hmark_tuple *t,
const struct xt_hmark_info *info)
{
struct ipv6hdr *ip6, _ip6;
int flag = IP6_FH_F_AUTH;
unsigned int nhoff = 0;
u16 fragoff = 0;
int nexthdr;
ip6 = (struct ipv6hdr *) (skb->data + skb_network_offset(skb));
nexthdr = ipv6_find_hdr(skb, &nhoff, -1, &fragoff, &flag);
if (nexthdr < 0)
return 0;
/* No need to check for icmp errors on fragments */
if ((flag & IP6_FH_F_FRAG) || (nexthdr != IPPROTO_ICMPV6))
goto noicmp;
/* Use inner header in case of ICMP errors */
if (get_inner6_hdr(skb, &nhoff)) {
ip6 = skb_header_pointer(skb, nhoff, sizeof(_ip6), &_ip6);
if (ip6 == NULL)
return -1;
/* If AH present, use SPI like in ESP. */
flag = IP6_FH_F_AUTH;
nexthdr = ipv6_find_hdr(skb, &nhoff, -1, &fragoff, &flag);
if (nexthdr < 0)
return -1;
}
noicmp:
t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.ip6);
t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.ip6);
if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
return 0;
t->proto = nexthdr;
if (t->proto == IPPROTO_ICMPV6)
return 0;
if (flag & IP6_FH_F_FRAG)
return 0;
hmark_set_tuple_ports(skb, nhoff, t, info);
return 0;
}
static unsigned int
hmark_tg_v6(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_hmark_info *info = par->targinfo;
struct hmark_tuple t;
memset(&t, 0, sizeof(struct hmark_tuple));
if (info->flags & XT_HMARK_FLAG(XT_HMARK_CT)) {
if (hmark_ct_set_htuple(skb, &t, info) < 0)
return XT_CONTINUE;
} else {
if (hmark_pkt_set_htuple_ipv6(skb, &t, info) < 0)
return XT_CONTINUE;
}
skb->mark = hmark_hash(&t, info);
return XT_CONTINUE;
}
#endif
static int get_inner_hdr(const struct sk_buff *skb, int iphsz, int *nhoff)
{
const struct icmphdr *icmph;
struct icmphdr _ih;
/* Not enough header? */
icmph = skb_header_pointer(skb, *nhoff + iphsz, sizeof(_ih), &_ih);
if (icmph == NULL || icmph->type > NR_ICMP_TYPES)
return 0;
/* Error message? */
if (!icmp_is_err(icmph->type))
return 0;
*nhoff += iphsz + sizeof(_ih);
return 1;
}
static int
hmark_pkt_set_htuple_ipv4(const struct sk_buff *skb, struct hmark_tuple *t,
const struct xt_hmark_info *info)
{
struct iphdr *ip, _ip;
int nhoff = skb_network_offset(skb);
ip = (struct iphdr *) (skb->data + nhoff);
if (ip->protocol == IPPROTO_ICMP) {
/* Use inner header in case of ICMP errors */
if (get_inner_hdr(skb, ip->ihl * 4, &nhoff)) {
ip = skb_header_pointer(skb, nhoff, sizeof(_ip), &_ip);
if (ip == NULL)
return -1;
}
}
t->src = ip->saddr & info->src_mask.ip;
t->dst = ip->daddr & info->dst_mask.ip;
if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
return 0;
t->proto = ip->protocol;
/* ICMP has no ports, skip */
if (t->proto == IPPROTO_ICMP)
return 0;
/* follow-up fragments don't contain ports, skip all fragments */
if (ip_is_fragment(ip))
return 0;
hmark_set_tuple_ports(skb, (ip->ihl * 4) + nhoff, t, info);
return 0;
}
static unsigned int
hmark_tg_v4(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_hmark_info *info = par->targinfo;
struct hmark_tuple t;
memset(&t, 0, sizeof(struct hmark_tuple));
if (info->flags & XT_HMARK_FLAG(XT_HMARK_CT)) {
if (hmark_ct_set_htuple(skb, &t, info) < 0)
return XT_CONTINUE;
} else {
if (hmark_pkt_set_htuple_ipv4(skb, &t, info) < 0)
return XT_CONTINUE;
}
skb->mark = hmark_hash(&t, info);
return XT_CONTINUE;
}
static int hmark_tg_check(const struct xt_tgchk_param *par)
{
const struct xt_hmark_info *info = par->targinfo;
const char *errmsg = "proto mask must be zero with L3 mode";
if (!info->hmodulus)
return -EINVAL;
if (info->proto_mask &&
(info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)))
goto err;
if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI_MASK) &&
(info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT_MASK) |
XT_HMARK_FLAG(XT_HMARK_DPORT_MASK))))
return -EINVAL;
if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI) &&
(info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT) |
XT_HMARK_FLAG(XT_HMARK_DPORT)))) {
errmsg = "spi-set and port-set can't be combined";
goto err;
}
return 0;
err:
pr_info_ratelimited("%s\n", errmsg);
return -EINVAL;
}
static struct xt_target hmark_tg_reg[] __read_mostly = {
{
.name = "HMARK",
.family = NFPROTO_IPV4,
.target = hmark_tg_v4,
.targetsize = sizeof(struct xt_hmark_info),
.checkentry = hmark_tg_check,
.me = THIS_MODULE,
},
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
{
.name = "HMARK",
.family = NFPROTO_IPV6,
.target = hmark_tg_v6,
.targetsize = sizeof(struct xt_hmark_info),
.checkentry = hmark_tg_check,
.me = THIS_MODULE,
},
#endif
};
static int __init hmark_tg_init(void)
{
return xt_register_targets(hmark_tg_reg, ARRAY_SIZE(hmark_tg_reg));
}
static void __exit hmark_tg_exit(void)
{
xt_unregister_targets(hmark_tg_reg, ARRAY_SIZE(hmark_tg_reg));
}
module_init(hmark_tg_init);
module_exit(hmark_tg_exit);
| linux-master | net/netfilter/xt_HMARK.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Kernel module to match TCP MSS values. */
/* Copyright (C) 2000 Marc Boucher <[email protected]>
* Portions (C) 2005 by Harald Welte <[email protected]>
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <net/tcp.h>
#include <linux/netfilter/xt_tcpmss.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marc Boucher <[email protected]>");
MODULE_DESCRIPTION("Xtables: TCP MSS match");
MODULE_ALIAS("ipt_tcpmss");
MODULE_ALIAS("ip6t_tcpmss");
static bool
tcpmss_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_tcpmss_match_info *info = par->matchinfo;
const struct tcphdr *th;
struct tcphdr _tcph;
/* tcp.doff is only 4 bits, ie. max 15 * 4 bytes */
const u_int8_t *op;
u8 _opt[15 * 4 - sizeof(_tcph)];
unsigned int i, optlen;
/* If we don't have the whole header, drop packet. */
th = skb_header_pointer(skb, par->thoff, sizeof(_tcph), &_tcph);
if (th == NULL)
goto dropit;
/* Malformed. */
if (th->doff*4 < sizeof(*th))
goto dropit;
optlen = th->doff*4 - sizeof(*th);
if (!optlen)
goto out;
/* Truncated options. */
op = skb_header_pointer(skb, par->thoff + sizeof(*th), optlen, _opt);
if (op == NULL)
goto dropit;
for (i = 0; i < optlen; ) {
if (op[i] == TCPOPT_MSS
&& (optlen - i) >= TCPOLEN_MSS
&& op[i+1] == TCPOLEN_MSS) {
u_int16_t mssval;
mssval = (op[i+2] << 8) | op[i+3];
return (mssval >= info->mss_min &&
mssval <= info->mss_max) ^ info->invert;
}
if (op[i] < 2)
i++;
else
i += op[i+1] ? : 1;
}
out:
return info->invert;
dropit:
par->hotdrop = true;
return false;
}
static struct xt_match tcpmss_mt_reg[] __read_mostly = {
{
.name = "tcpmss",
.family = NFPROTO_IPV4,
.match = tcpmss_mt,
.matchsize = sizeof(struct xt_tcpmss_match_info),
.proto = IPPROTO_TCP,
.me = THIS_MODULE,
},
{
.name = "tcpmss",
.family = NFPROTO_IPV6,
.match = tcpmss_mt,
.matchsize = sizeof(struct xt_tcpmss_match_info),
.proto = IPPROTO_TCP,
.me = THIS_MODULE,
},
};
static int __init tcpmss_mt_init(void)
{
return xt_register_matches(tcpmss_mt_reg, ARRAY_SIZE(tcpmss_mt_reg));
}
static void __exit tcpmss_mt_exit(void)
{
xt_unregister_matches(tcpmss_mt_reg, ARRAY_SIZE(tcpmss_mt_reg));
}
module_init(tcpmss_mt_init);
module_exit(tcpmss_mt_exit);
| linux-master | net/netfilter/xt_TCPMSS.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.