blob: bf27007a574ae90b3f33d0e3f632d1ece9920d63 [file] [log] [blame]
Sven Eckelmann0046b042016-01-01 00:01:03 +01001/* Copyright (C) 2009-2016 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Antonio Quartulliebf38fb2013-11-03 20:40:48 +010015 * along with this program; if not, see <http://www.gnu.org/licenses/>.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000016 */
17
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000018#include "originator.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020019#include "main.h"
20
21#include <linux/errno.h>
22#include <linux/etherdevice.h>
23#include <linux/fs.h>
24#include <linux/jiffies.h>
25#include <linux/kernel.h>
Sven Eckelmann90f564d2016-01-16 10:29:40 +010026#include <linux/kref.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020027#include <linux/list.h>
28#include <linux/lockdep.h>
29#include <linux/netdevice.h>
Marek Lindnerd0fa4f32015-06-22 00:30:22 +080030#include <linux/rculist.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020031#include <linux/seq_file.h>
32#include <linux/slab.h>
33#include <linux/spinlock.h>
34#include <linux/workqueue.h>
35
36#include "distributed-arp-table.h"
37#include "fragmentation.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000038#include "gateway_client.h"
39#include "hard-interface.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020040#include "hash.h"
Linus Lüssing60432d72014-02-15 17:47:51 +010041#include "multicast.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020042#include "network-coding.h"
43#include "routing.h"
44#include "translation-table.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000045
Antonio Quartullidec05072012-11-10 11:00:32 +010046/* hash class keys */
47static struct lock_class_key batadv_orig_hash_lock_class_key;
48
Sven Eckelmann03fc7f82012-05-12 18:34:00 +020049static void batadv_purge_orig(struct work_struct *work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000050
Sven Eckelmann62fe7102015-09-15 19:00:48 +020051/**
Sven Eckelmann7afcbbe2015-10-31 12:29:29 +010052 * batadv_compare_orig - comparing function used in the originator hash table
53 * @node: node in the local table
54 * @data2: second object to compare the node to
Sven Eckelmann62fe7102015-09-15 19:00:48 +020055 *
56 * Return: 1 if they are the same originator
57 */
Antonio Quartullibbad0a52013-09-02 12:15:02 +020058int batadv_compare_orig(const struct hlist_node *node, const void *data2)
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020059{
Sven Eckelmann56303d32012-06-05 22:31:31 +020060 const void *data1 = container_of(node, struct batadv_orig_node,
61 hash_entry);
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020062
dingtianhong323813e2013-12-26 19:40:39 +080063 return batadv_compare_eth(data1, data2);
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020064}
65
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +020066/**
67 * batadv_orig_node_vlan_get - get an orig_node_vlan object
68 * @orig_node: the originator serving the VLAN
69 * @vid: the VLAN identifier
70 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +020071 * Return: the vlan object identified by vid and belonging to orig_node or NULL
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +020072 * if it does not exist.
73 */
74struct batadv_orig_node_vlan *
75batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
76 unsigned short vid)
77{
78 struct batadv_orig_node_vlan *vlan = NULL, *tmp;
79
80 rcu_read_lock();
Marek Lindnerd0fa4f32015-06-22 00:30:22 +080081 hlist_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +020082 if (tmp->vid != vid)
83 continue;
84
85 if (!atomic_inc_not_zero(&tmp->refcount))
86 continue;
87
88 vlan = tmp;
89
90 break;
91 }
92 rcu_read_unlock();
93
94 return vlan;
95}
96
97/**
98 * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
99 * object
100 * @orig_node: the originator serving the VLAN
101 * @vid: the VLAN identifier
102 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200103 * Return: NULL in case of failure or the vlan object identified by vid and
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200104 * belonging to orig_node otherwise. The object is created and added to the list
105 * if it does not exist.
106 *
107 * The object is returned with refcounter increased by 1.
108 */
109struct batadv_orig_node_vlan *
110batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
111 unsigned short vid)
112{
113 struct batadv_orig_node_vlan *vlan;
114
115 spin_lock_bh(&orig_node->vlan_list_lock);
116
117 /* first look if an object for this vid already exists */
118 vlan = batadv_orig_node_vlan_get(orig_node, vid);
119 if (vlan)
120 goto out;
121
122 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
123 if (!vlan)
124 goto out;
125
126 atomic_set(&vlan->refcount, 2);
127 vlan->vid = vid;
128
Marek Lindnerd0fa4f32015-06-22 00:30:22 +0800129 hlist_add_head_rcu(&vlan->list, &orig_node->vlan_list);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200130
131out:
132 spin_unlock_bh(&orig_node->vlan_list_lock);
133
134 return vlan;
135}
136
137/**
138 * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly free
139 * the originator-vlan object
140 * @orig_vlan: the originator-vlan object to release
141 */
142void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
143{
144 if (atomic_dec_and_test(&orig_vlan->refcount))
145 kfree_rcu(orig_vlan, rcu);
146}
147
Sven Eckelmann56303d32012-06-05 22:31:31 +0200148int batadv_originator_init(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000149{
150 if (bat_priv->orig_hash)
Sven Eckelmann5346c352012-05-05 13:27:28 +0200151 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000152
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200153 bat_priv->orig_hash = batadv_hash_new(1024);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000154
155 if (!bat_priv->orig_hash)
156 goto err;
157
Antonio Quartullidec05072012-11-10 11:00:32 +0100158 batadv_hash_set_lock_class(bat_priv->orig_hash,
159 &batadv_orig_hash_lock_class_key);
160
Antonio Quartulli72414442012-12-25 13:14:37 +0100161 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
162 queue_delayed_work(batadv_event_workqueue,
163 &bat_priv->orig_work,
164 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
165
Sven Eckelmann5346c352012-05-05 13:27:28 +0200166 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000167
168err:
Sven Eckelmann5346c352012-05-05 13:27:28 +0200169 return -ENOMEM;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000170}
171
Simon Wunderlich89652332013-11-13 19:14:46 +0100172/**
Sven Eckelmannae3e1e32016-01-05 12:06:24 +0100173 * batadv_neigh_ifinfo_release - release neigh_ifinfo from lists and queue for
174 * free after rcu grace period
Sven Eckelmann962c6832016-01-16 10:29:51 +0100175 * @ref: kref pointer of the neigh_ifinfo
Simon Wunderlich89652332013-11-13 19:14:46 +0100176 */
Sven Eckelmann962c6832016-01-16 10:29:51 +0100177static void batadv_neigh_ifinfo_release(struct kref *ref)
Simon Wunderlich89652332013-11-13 19:14:46 +0100178{
Sven Eckelmann962c6832016-01-16 10:29:51 +0100179 struct batadv_neigh_ifinfo *neigh_ifinfo;
180
181 neigh_ifinfo = container_of(ref, struct batadv_neigh_ifinfo, refcount);
182
Sven Eckelmannae3e1e32016-01-05 12:06:24 +0100183 if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
184 batadv_hardif_free_ref(neigh_ifinfo->if_outgoing);
185
186 kfree_rcu(neigh_ifinfo, rcu);
Simon Wunderlich89652332013-11-13 19:14:46 +0100187}
188
189/**
Sven Eckelmannae3e1e32016-01-05 12:06:24 +0100190 * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly release
Simon Wunderlich89652332013-11-13 19:14:46 +0100191 * the neigh_ifinfo
192 * @neigh_ifinfo: the neigh_ifinfo object to release
193 */
194void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
195{
Sven Eckelmann962c6832016-01-16 10:29:51 +0100196 kref_put(&neigh_ifinfo->refcount, batadv_neigh_ifinfo_release);
Simon Wunderlich89652332013-11-13 19:14:46 +0100197}
198
199/**
Sven Eckelmannf6389692016-01-05 12:06:23 +0100200 * batadv_hardif_neigh_release - release hardif neigh node from lists and
201 * queue for free after rcu grace period
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100202 * @ref: kref pointer of the neigh_node
Marek Lindnercef63412015-08-04 21:09:55 +0800203 */
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100204static void batadv_hardif_neigh_release(struct kref *ref)
Marek Lindnercef63412015-08-04 21:09:55 +0800205{
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100206 struct batadv_hardif_neigh_node *hardif_neigh;
207
208 hardif_neigh = container_of(ref, struct batadv_hardif_neigh_node,
209 refcount);
210
Sven Eckelmannf6389692016-01-05 12:06:23 +0100211 spin_lock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
212 hlist_del_init_rcu(&hardif_neigh->list);
213 spin_unlock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
Sven Eckelmannbab7c6c2016-01-05 12:06:17 +0100214
Sven Eckelmannf6389692016-01-05 12:06:23 +0100215 batadv_hardif_free_ref(hardif_neigh->if_incoming);
216 kfree_rcu(hardif_neigh, rcu);
Marek Lindnercef63412015-08-04 21:09:55 +0800217}
218
219/**
220 * batadv_hardif_neigh_free_ref - decrement the hardif neighbors refcounter
Sven Eckelmannf6389692016-01-05 12:06:23 +0100221 * and possibly release it
Marek Lindnercef63412015-08-04 21:09:55 +0800222 * @hardif_neigh: hardif neigh neighbor to free
223 */
224void batadv_hardif_neigh_free_ref(struct batadv_hardif_neigh_node *hardif_neigh)
225{
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100226 kref_put(&hardif_neigh->refcount, batadv_hardif_neigh_release);
Marek Lindnercef63412015-08-04 21:09:55 +0800227}
228
229/**
Sven Eckelmannb4d922c2016-01-05 12:06:25 +0100230 * batadv_neigh_node_release - release neigh_node from lists and queue for
231 * free after rcu grace period
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100232 * @ref: kref pointer of the neigh_node
Simon Wunderlich89652332013-11-13 19:14:46 +0100233 */
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100234static void batadv_neigh_node_release(struct kref *ref)
Simon Wunderlich89652332013-11-13 19:14:46 +0100235{
236 struct hlist_node *node_tmp;
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100237 struct batadv_neigh_node *neigh_node;
Marek Lindnercef63412015-08-04 21:09:55 +0800238 struct batadv_hardif_neigh_node *hardif_neigh;
Simon Wunderlich89652332013-11-13 19:14:46 +0100239 struct batadv_neigh_ifinfo *neigh_ifinfo;
Antonio Quartullibcef1f32015-03-01 00:50:17 +0800240 struct batadv_algo_ops *bao;
Simon Wunderlich89652332013-11-13 19:14:46 +0100241
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100242 neigh_node = container_of(ref, struct batadv_neigh_node, refcount);
Antonio Quartullibcef1f32015-03-01 00:50:17 +0800243 bao = neigh_node->orig_node->bat_priv->bat_algo_ops;
Simon Wunderlich89652332013-11-13 19:14:46 +0100244
245 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
246 &neigh_node->ifinfo_list, list) {
Sven Eckelmannae3e1e32016-01-05 12:06:24 +0100247 batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
Simon Wunderlich89652332013-11-13 19:14:46 +0100248 }
Antonio Quartullibcef1f32015-03-01 00:50:17 +0800249
Marek Lindnercef63412015-08-04 21:09:55 +0800250 hardif_neigh = batadv_hardif_neigh_get(neigh_node->if_incoming,
251 neigh_node->addr);
252 if (hardif_neigh) {
253 /* batadv_hardif_neigh_get() increases refcount too */
Sven Eckelmannf6389692016-01-05 12:06:23 +0100254 batadv_hardif_neigh_free_ref(hardif_neigh);
255 batadv_hardif_neigh_free_ref(hardif_neigh);
Marek Lindnercef63412015-08-04 21:09:55 +0800256 }
257
Antonio Quartullibcef1f32015-03-01 00:50:17 +0800258 if (bao->bat_neigh_free)
259 bao->bat_neigh_free(neigh_node);
260
Sven Eckelmannb4d922c2016-01-05 12:06:25 +0100261 batadv_hardif_free_ref(neigh_node->if_incoming);
Simon Wunderlich89652332013-11-13 19:14:46 +0100262
Sven Eckelmannb4d922c2016-01-05 12:06:25 +0100263 kfree_rcu(neigh_node, rcu);
Simon Wunderlich89652332013-11-13 19:14:46 +0100264}
265
266/**
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100267 * batadv_neigh_node_free_ref - decrement the neighbors refcounter and possibly
268 * release it
Simon Wunderlich89652332013-11-13 19:14:46 +0100269 * @neigh_node: neigh neighbor to free
270 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200271void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000272{
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100273 kref_put(&neigh_node->refcount, batadv_neigh_node_release);
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000274}
275
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100276/**
277 * batadv_orig_node_get_router - router to the originator depending on iface
278 * @orig_node: the orig node for the router
279 * @if_outgoing: the interface where the payload packet has been received or
280 * the OGM should be sent to
281 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200282 * Return: the neighbor which should be router for this orig_node/iface.
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100283 *
284 * The object is returned with refcounter increased by 1.
285 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200286struct batadv_neigh_node *
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100287batadv_orig_router_get(struct batadv_orig_node *orig_node,
288 const struct batadv_hard_iface *if_outgoing)
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000289{
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100290 struct batadv_orig_ifinfo *orig_ifinfo;
291 struct batadv_neigh_node *router = NULL;
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000292
293 rcu_read_lock();
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100294 hlist_for_each_entry_rcu(orig_ifinfo, &orig_node->ifinfo_list, list) {
295 if (orig_ifinfo->if_outgoing != if_outgoing)
296 continue;
297
298 router = rcu_dereference(orig_ifinfo->router);
299 break;
300 }
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000301
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100302 if (router && !kref_get_unless_zero(&router->refcount))
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000303 router = NULL;
304
305 rcu_read_unlock();
306 return router;
307}
308
Antonio Quartulli0538f7592013-09-02 12:15:01 +0200309/**
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100310 * batadv_orig_ifinfo_get - find the ifinfo from an orig_node
311 * @orig_node: the orig node to be queried
312 * @if_outgoing: the interface for which the ifinfo should be acquired
313 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200314 * Return: the requested orig_ifinfo or NULL if not found.
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100315 *
316 * The object is returned with refcounter increased by 1.
317 */
318struct batadv_orig_ifinfo *
319batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
320 struct batadv_hard_iface *if_outgoing)
321{
322 struct batadv_orig_ifinfo *tmp, *orig_ifinfo = NULL;
323
324 rcu_read_lock();
325 hlist_for_each_entry_rcu(tmp, &orig_node->ifinfo_list,
326 list) {
327 if (tmp->if_outgoing != if_outgoing)
328 continue;
329
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100330 if (!kref_get_unless_zero(&tmp->refcount))
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100331 continue;
332
333 orig_ifinfo = tmp;
334 break;
335 }
336 rcu_read_unlock();
337
338 return orig_ifinfo;
339}
340
341/**
342 * batadv_orig_ifinfo_new - search and possibly create an orig_ifinfo object
343 * @orig_node: the orig node to be queried
344 * @if_outgoing: the interface for which the ifinfo should be acquired
345 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200346 * Return: NULL in case of failure or the orig_ifinfo object for the if_outgoing
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100347 * interface otherwise. The object is created and added to the list
348 * if it does not exist.
349 *
350 * The object is returned with refcounter increased by 1.
351 */
352struct batadv_orig_ifinfo *
353batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node,
354 struct batadv_hard_iface *if_outgoing)
355{
356 struct batadv_orig_ifinfo *orig_ifinfo = NULL;
357 unsigned long reset_time;
358
359 spin_lock_bh(&orig_node->neigh_list_lock);
360
361 orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing);
362 if (orig_ifinfo)
363 goto out;
364
365 orig_ifinfo = kzalloc(sizeof(*orig_ifinfo), GFP_ATOMIC);
366 if (!orig_ifinfo)
367 goto out;
368
369 if (if_outgoing != BATADV_IF_DEFAULT &&
Sven Eckelmann7a659d52016-01-16 10:29:54 +0100370 !kref_get_unless_zero(&if_outgoing->refcount)) {
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100371 kfree(orig_ifinfo);
372 orig_ifinfo = NULL;
373 goto out;
374 }
375
376 reset_time = jiffies - 1;
377 reset_time -= msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
378 orig_ifinfo->batman_seqno_reset = reset_time;
379 orig_ifinfo->if_outgoing = if_outgoing;
380 INIT_HLIST_NODE(&orig_ifinfo->list);
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100381 kref_init(&orig_ifinfo->refcount);
382 kref_get(&orig_ifinfo->refcount);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100383 hlist_add_head_rcu(&orig_ifinfo->list,
384 &orig_node->ifinfo_list);
385out:
386 spin_unlock_bh(&orig_node->neigh_list_lock);
387 return orig_ifinfo;
388}
389
390/**
Simon Wunderlich89652332013-11-13 19:14:46 +0100391 * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
Sven Eckelmanne51f0392015-09-06 21:38:51 +0200392 * @neigh: the neigh node to be queried
Simon Wunderlich89652332013-11-13 19:14:46 +0100393 * @if_outgoing: the interface for which the ifinfo should be acquired
394 *
395 * The object is returned with refcounter increased by 1.
396 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200397 * Return: the requested neigh_ifinfo or NULL if not found
Simon Wunderlich89652332013-11-13 19:14:46 +0100398 */
399struct batadv_neigh_ifinfo *
400batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
401 struct batadv_hard_iface *if_outgoing)
402{
403 struct batadv_neigh_ifinfo *neigh_ifinfo = NULL,
404 *tmp_neigh_ifinfo;
405
406 rcu_read_lock();
407 hlist_for_each_entry_rcu(tmp_neigh_ifinfo, &neigh->ifinfo_list,
408 list) {
409 if (tmp_neigh_ifinfo->if_outgoing != if_outgoing)
410 continue;
411
Sven Eckelmann962c6832016-01-16 10:29:51 +0100412 if (!kref_get_unless_zero(&tmp_neigh_ifinfo->refcount))
Simon Wunderlich89652332013-11-13 19:14:46 +0100413 continue;
414
415 neigh_ifinfo = tmp_neigh_ifinfo;
416 break;
417 }
418 rcu_read_unlock();
419
420 return neigh_ifinfo;
421}
422
423/**
424 * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
Sven Eckelmanne51f0392015-09-06 21:38:51 +0200425 * @neigh: the neigh node to be queried
Simon Wunderlich89652332013-11-13 19:14:46 +0100426 * @if_outgoing: the interface for which the ifinfo should be acquired
427 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200428 * Return: NULL in case of failure or the neigh_ifinfo object for the
Simon Wunderlich89652332013-11-13 19:14:46 +0100429 * if_outgoing interface otherwise. The object is created and added to the list
430 * if it does not exist.
431 *
432 * The object is returned with refcounter increased by 1.
433 */
434struct batadv_neigh_ifinfo *
435batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
436 struct batadv_hard_iface *if_outgoing)
437{
438 struct batadv_neigh_ifinfo *neigh_ifinfo;
439
440 spin_lock_bh(&neigh->ifinfo_lock);
441
442 neigh_ifinfo = batadv_neigh_ifinfo_get(neigh, if_outgoing);
443 if (neigh_ifinfo)
444 goto out;
445
446 neigh_ifinfo = kzalloc(sizeof(*neigh_ifinfo), GFP_ATOMIC);
447 if (!neigh_ifinfo)
448 goto out;
449
Sven Eckelmann7a659d52016-01-16 10:29:54 +0100450 if (if_outgoing && !kref_get_unless_zero(&if_outgoing->refcount)) {
Simon Wunderlich89652332013-11-13 19:14:46 +0100451 kfree(neigh_ifinfo);
452 neigh_ifinfo = NULL;
453 goto out;
454 }
455
456 INIT_HLIST_NODE(&neigh_ifinfo->list);
Sven Eckelmann962c6832016-01-16 10:29:51 +0100457 kref_init(&neigh_ifinfo->refcount);
458 kref_get(&neigh_ifinfo->refcount);
Simon Wunderlich89652332013-11-13 19:14:46 +0100459 neigh_ifinfo->if_outgoing = if_outgoing;
460
461 hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list);
462
463out:
464 spin_unlock_bh(&neigh->ifinfo_lock);
465
466 return neigh_ifinfo;
467}
468
469/**
Marek Lindnered292662015-08-04 23:31:44 +0800470 * batadv_neigh_node_get - retrieve a neighbour from the list
471 * @orig_node: originator which the neighbour belongs to
472 * @hard_iface: the interface where this neighbour is connected to
473 * @addr: the address of the neighbour
474 *
475 * Looks for and possibly returns a neighbour belonging to this originator list
476 * which is connected through the provided hard interface.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200477 *
478 * Return: neighbor when found. Othwerwise NULL
Marek Lindnered292662015-08-04 23:31:44 +0800479 */
480static struct batadv_neigh_node *
481batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
482 const struct batadv_hard_iface *hard_iface,
483 const u8 *addr)
484{
485 struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
486
487 rcu_read_lock();
488 hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) {
489 if (!batadv_compare_eth(tmp_neigh_node->addr, addr))
490 continue;
491
492 if (tmp_neigh_node->if_incoming != hard_iface)
493 continue;
494
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100495 if (!kref_get_unless_zero(&tmp_neigh_node->refcount))
Marek Lindnered292662015-08-04 23:31:44 +0800496 continue;
497
498 res = tmp_neigh_node;
499 break;
500 }
501 rcu_read_unlock();
502
503 return res;
504}
505
506/**
Marek Lindnercef63412015-08-04 21:09:55 +0800507 * batadv_hardif_neigh_create - create a hardif neighbour node
508 * @hard_iface: the interface this neighbour is connected to
509 * @neigh_addr: the interface address of the neighbour to retrieve
510 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200511 * Return: the hardif neighbour node if found or created or NULL otherwise.
Marek Lindnercef63412015-08-04 21:09:55 +0800512 */
513static struct batadv_hardif_neigh_node *
514batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface,
515 const u8 *neigh_addr)
516{
Marek Lindner8248a4c2015-08-04 21:09:56 +0800517 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Marek Lindnercef63412015-08-04 21:09:55 +0800518 struct batadv_hardif_neigh_node *hardif_neigh = NULL;
519
520 spin_lock_bh(&hard_iface->neigh_list_lock);
521
522 /* check if neighbor hasn't been added in the meantime */
523 hardif_neigh = batadv_hardif_neigh_get(hard_iface, neigh_addr);
524 if (hardif_neigh)
525 goto out;
526
Sven Eckelmann7a659d52016-01-16 10:29:54 +0100527 if (!kref_get_unless_zero(&hard_iface->refcount))
Marek Lindnercef63412015-08-04 21:09:55 +0800528 goto out;
529
530 hardif_neigh = kzalloc(sizeof(*hardif_neigh), GFP_ATOMIC);
531 if (!hardif_neigh) {
532 batadv_hardif_free_ref(hard_iface);
533 goto out;
534 }
535
536 INIT_HLIST_NODE(&hardif_neigh->list);
537 ether_addr_copy(hardif_neigh->addr, neigh_addr);
538 hardif_neigh->if_incoming = hard_iface;
539 hardif_neigh->last_seen = jiffies;
540
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100541 kref_init(&hardif_neigh->refcount);
Marek Lindnercef63412015-08-04 21:09:55 +0800542
Marek Lindner8248a4c2015-08-04 21:09:56 +0800543 if (bat_priv->bat_algo_ops->bat_hardif_neigh_init)
544 bat_priv->bat_algo_ops->bat_hardif_neigh_init(hardif_neigh);
545
Marek Lindnercef63412015-08-04 21:09:55 +0800546 hlist_add_head(&hardif_neigh->list, &hard_iface->neigh_list);
547
548out:
549 spin_unlock_bh(&hard_iface->neigh_list_lock);
550 return hardif_neigh;
551}
552
553/**
554 * batadv_hardif_neigh_get_or_create - retrieve or create a hardif neighbour
555 * node
556 * @hard_iface: the interface this neighbour is connected to
557 * @neigh_addr: the interface address of the neighbour to retrieve
558 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200559 * Return: the hardif neighbour node if found or created or NULL otherwise.
Marek Lindnercef63412015-08-04 21:09:55 +0800560 */
561static struct batadv_hardif_neigh_node *
562batadv_hardif_neigh_get_or_create(struct batadv_hard_iface *hard_iface,
563 const u8 *neigh_addr)
564{
565 struct batadv_hardif_neigh_node *hardif_neigh = NULL;
566
567 /* first check without locking to avoid the overhead */
568 hardif_neigh = batadv_hardif_neigh_get(hard_iface, neigh_addr);
569 if (hardif_neigh)
570 return hardif_neigh;
571
572 return batadv_hardif_neigh_create(hard_iface, neigh_addr);
573}
574
575/**
576 * batadv_hardif_neigh_get - retrieve a hardif neighbour from the list
577 * @hard_iface: the interface where this neighbour is connected to
578 * @neigh_addr: the address of the neighbour
579 *
580 * Looks for and possibly returns a neighbour belonging to this hard interface.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200581 *
582 * Return: neighbor when found. Othwerwise NULL
Marek Lindnercef63412015-08-04 21:09:55 +0800583 */
584struct batadv_hardif_neigh_node *
585batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface,
586 const u8 *neigh_addr)
587{
588 struct batadv_hardif_neigh_node *tmp_hardif_neigh, *hardif_neigh = NULL;
589
590 rcu_read_lock();
591 hlist_for_each_entry_rcu(tmp_hardif_neigh,
592 &hard_iface->neigh_list, list) {
593 if (!batadv_compare_eth(tmp_hardif_neigh->addr, neigh_addr))
594 continue;
595
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100596 if (!kref_get_unless_zero(&tmp_hardif_neigh->refcount))
Marek Lindnercef63412015-08-04 21:09:55 +0800597 continue;
598
599 hardif_neigh = tmp_hardif_neigh;
600 break;
601 }
602 rcu_read_unlock();
603
604 return hardif_neigh;
605}
606
607/**
Antonio Quartulli0538f7592013-09-02 12:15:01 +0200608 * batadv_neigh_node_new - create and init a new neigh_node object
Marek Lindner3f32f8a2015-07-26 04:59:15 +0800609 * @orig_node: originator object representing the neighbour
Antonio Quartulli0538f7592013-09-02 12:15:01 +0200610 * @hard_iface: the interface where the neighbour is connected to
611 * @neigh_addr: the mac address of the neighbour interface
Antonio Quartulli0538f7592013-09-02 12:15:01 +0200612 *
613 * Allocates a new neigh_node object and initialises all the generic fields.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200614 *
615 * Return: neighbor when found. Othwerwise NULL
Antonio Quartulli0538f7592013-09-02 12:15:01 +0200616 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200617struct batadv_neigh_node *
Marek Lindner3f32f8a2015-07-26 04:59:15 +0800618batadv_neigh_node_new(struct batadv_orig_node *orig_node,
619 struct batadv_hard_iface *hard_iface,
620 const u8 *neigh_addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000621{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200622 struct batadv_neigh_node *neigh_node;
Marek Lindnercef63412015-08-04 21:09:55 +0800623 struct batadv_hardif_neigh_node *hardif_neigh = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000624
Marek Lindner741aa062015-07-26 04:57:43 +0800625 neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr);
626 if (neigh_node)
627 goto out;
628
Marek Lindnercef63412015-08-04 21:09:55 +0800629 hardif_neigh = batadv_hardif_neigh_get_or_create(hard_iface,
630 neigh_addr);
631 if (!hardif_neigh)
632 goto out;
633
Sven Eckelmann704509b2011-05-14 23:14:54 +0200634 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000635 if (!neigh_node)
Marek Lindner7ae8b282012-03-01 15:35:21 +0800636 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000637
Sven Eckelmann7a659d52016-01-16 10:29:54 +0100638 if (!kref_get_unless_zero(&hard_iface->refcount)) {
Marek Lindnerf729dc702015-07-26 04:37:15 +0800639 kfree(neigh_node);
640 neigh_node = NULL;
641 goto out;
642 }
643
Marek Lindner9591a792010-12-12 21:57:11 +0000644 INIT_HLIST_NODE(&neigh_node->list);
Simon Wunderlich89652332013-11-13 19:14:46 +0100645 INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
646 spin_lock_init(&neigh_node->ifinfo_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000647
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100648 ether_addr_copy(neigh_node->addr, neigh_addr);
Antonio Quartulli0538f7592013-09-02 12:15:01 +0200649 neigh_node->if_incoming = hard_iface;
650 neigh_node->orig_node = orig_node;
651
Marek Lindner1605d0d2011-02-18 12:28:11 +0000652 /* extra reference for return */
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100653 kref_init(&neigh_node->refcount);
654 kref_get(&neigh_node->refcount);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000655
Marek Lindner741aa062015-07-26 04:57:43 +0800656 spin_lock_bh(&orig_node->neigh_list_lock);
657 hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
658 spin_unlock_bh(&orig_node->neigh_list_lock);
659
Marek Lindnercef63412015-08-04 21:09:55 +0800660 /* increment unique neighbor refcount */
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100661 kref_get(&hardif_neigh->refcount);
Marek Lindnercef63412015-08-04 21:09:55 +0800662
Marek Lindner741aa062015-07-26 04:57:43 +0800663 batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv,
664 "Creating new neighbor %pM for orig_node %pM on interface %s\n",
665 neigh_addr, orig_node->orig, hard_iface->net_dev->name);
666
Marek Lindner7ae8b282012-03-01 15:35:21 +0800667out:
Marek Lindnercef63412015-08-04 21:09:55 +0800668 if (hardif_neigh)
669 batadv_hardif_neigh_free_ref(hardif_neigh);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000670 return neigh_node;
671}
672
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100673/**
Marek Lindner75874052015-08-04 21:09:57 +0800674 * batadv_hardif_neigh_seq_print_text - print the single hop neighbour list
675 * @seq: neighbour table seq_file struct
676 * @offset: not used
677 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200678 * Return: always 0
Marek Lindner75874052015-08-04 21:09:57 +0800679 */
680int batadv_hardif_neigh_seq_print_text(struct seq_file *seq, void *offset)
681{
682 struct net_device *net_dev = (struct net_device *)seq->private;
683 struct batadv_priv *bat_priv = netdev_priv(net_dev);
684 struct batadv_hard_iface *primary_if;
685
686 primary_if = batadv_seq_print_text_primary_if_get(seq);
687 if (!primary_if)
688 return 0;
689
690 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
691 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
692 primary_if->net_dev->dev_addr, net_dev->name,
693 bat_priv->bat_algo_ops->name);
694
695 batadv_hardif_free_ref(primary_if);
696
697 if (!bat_priv->bat_algo_ops->bat_neigh_print) {
698 seq_puts(seq,
699 "No printing function for this routing protocol\n");
700 return 0;
701 }
702
703 bat_priv->bat_algo_ops->bat_neigh_print(bat_priv, seq);
704 return 0;
705}
706
707/**
Sven Eckelmann2baa7532016-01-05 12:06:22 +0100708 * batadv_orig_ifinfo_release - release orig_ifinfo from lists and queue for
709 * free after rcu grace period
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100710 * @ref: kref pointer of the orig_ifinfo
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100711 */
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100712static void batadv_orig_ifinfo_release(struct kref *ref)
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100713{
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100714 struct batadv_orig_ifinfo *orig_ifinfo;
Simon Wunderlich000c8df2014-03-26 15:46:22 +0100715 struct batadv_neigh_node *router;
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100716
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100717 orig_ifinfo = container_of(ref, struct batadv_orig_ifinfo, refcount);
718
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100719 if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
Sven Eckelmann2baa7532016-01-05 12:06:22 +0100720 batadv_hardif_free_ref(orig_ifinfo->if_outgoing);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100721
Simon Wunderlich000c8df2014-03-26 15:46:22 +0100722 /* this is the last reference to this object */
723 router = rcu_dereference_protected(orig_ifinfo->router, true);
724 if (router)
Sven Eckelmann2baa7532016-01-05 12:06:22 +0100725 batadv_neigh_node_free_ref(router);
726
727 kfree_rcu(orig_ifinfo, rcu);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100728}
729
730/**
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100731 * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly release
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100732 * the orig_ifinfo
733 * @orig_ifinfo: the orig_ifinfo object to release
734 */
735void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
736{
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100737 kref_put(&orig_ifinfo->refcount, batadv_orig_ifinfo_release);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100738}
739
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100740/**
741 * batadv_orig_node_free_rcu - free the orig_node
742 * @rcu: rcu pointer of the orig_node
743 */
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200744static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000745{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200746 struct batadv_orig_node *orig_node;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000747
Sven Eckelmann56303d32012-06-05 22:31:31 +0200748 orig_node = container_of(rcu, struct batadv_orig_node, rcu);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000749
Linus Lüssing60432d72014-02-15 17:47:51 +0100750 batadv_mcast_purge_orig(orig_node);
751
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200752 batadv_frag_purge_orig(orig_node, NULL);
753
Antonio Quartullid0015fd2013-09-03 11:10:23 +0200754 if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
755 orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
756
Antonio Quartullia73105b2011-04-27 14:27:44 +0200757 kfree(orig_node->tt_buff);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000758 kfree(orig_node);
759}
760
Linus Lüssing72822222013-04-15 21:43:29 +0800761/**
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100762 * batadv_orig_node_release - release orig_node from lists and queue for
763 * free after rcu grace period
764 * @orig_node: the orig node to free
765 */
766static void batadv_orig_node_release(struct batadv_orig_node *orig_node)
767{
768 struct hlist_node *node_tmp;
769 struct batadv_neigh_node *neigh_node;
770 struct batadv_orig_ifinfo *orig_ifinfo;
771
772 spin_lock_bh(&orig_node->neigh_list_lock);
773
774 /* for all neighbors towards this originator ... */
775 hlist_for_each_entry_safe(neigh_node, node_tmp,
776 &orig_node->neigh_list, list) {
777 hlist_del_rcu(&neigh_node->list);
778 batadv_neigh_node_free_ref(neigh_node);
779 }
780
781 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
782 &orig_node->ifinfo_list, list) {
783 hlist_del_rcu(&orig_ifinfo->list);
784 batadv_orig_ifinfo_free_ref(orig_ifinfo);
785 }
786 spin_unlock_bh(&orig_node->neigh_list_lock);
787
788 /* Free nc_nodes */
789 batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
790
791 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
792}
793
794/**
Linus Lüssing72822222013-04-15 21:43:29 +0800795 * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100796 * release it
Linus Lüssing72822222013-04-15 21:43:29 +0800797 * @orig_node: the orig node to free
798 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200799void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000800{
801 if (atomic_dec_and_test(&orig_node->refcount))
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100802 batadv_orig_node_release(orig_node);
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000803}
804
Sven Eckelmann56303d32012-06-05 22:31:31 +0200805void batadv_originator_free(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000806{
Sven Eckelmann5bf74e92012-06-05 22:31:28 +0200807 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800808 struct hlist_node *node_tmp;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000809 struct hlist_head *head;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000810 spinlock_t *list_lock; /* spinlock to protect write access */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200811 struct batadv_orig_node *orig_node;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200812 u32 i;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000813
814 if (!hash)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000815 return;
816
817 cancel_delayed_work_sync(&bat_priv->orig_work);
818
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000819 bat_priv->orig_hash = NULL;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000820
821 for (i = 0; i < hash->size; i++) {
822 head = &hash->table[i];
823 list_lock = &hash->list_locks[i];
824
825 spin_lock_bh(list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800826 hlist_for_each_entry_safe(orig_node, node_tmp,
Marek Lindner7aadf882011-02-18 12:28:09 +0000827 head, hash_entry) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800828 hlist_del_rcu(&orig_node->hash_entry);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200829 batadv_orig_node_free_ref(orig_node);
Marek Lindner16b1aba2011-01-19 20:01:42 +0000830 }
831 spin_unlock_bh(list_lock);
832 }
833
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200834 batadv_hash_destroy(hash);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000835}
836
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200837/**
838 * batadv_orig_node_new - creates a new orig_node
839 * @bat_priv: the bat priv with all the soft interface information
840 * @addr: the mac address of the originator
841 *
842 * Creates a new originator object and initialise all the generic fields.
843 * The new object is not added to the originator list.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200844 *
845 * Return: the newly created object or NULL on failure.
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200846 */
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200847struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200848 const u8 *addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000849{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200850 struct batadv_orig_node *orig_node;
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200851 struct batadv_orig_node_vlan *vlan;
Sven Eckelmann42d0b0442012-06-03 22:19:17 +0200852 unsigned long reset_time;
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200853 int i;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000854
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200855 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
856 "Creating new originator: %pM\n", addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000857
Sven Eckelmann704509b2011-05-14 23:14:54 +0200858 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000859 if (!orig_node)
860 return NULL;
861
Marek Lindner9591a792010-12-12 21:57:11 +0000862 INIT_HLIST_HEAD(&orig_node->neigh_list);
Marek Lindnerd0fa4f32015-06-22 00:30:22 +0800863 INIT_HLIST_HEAD(&orig_node->vlan_list);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100864 INIT_HLIST_HEAD(&orig_node->ifinfo_list);
Marek Lindnerf3e00082011-01-25 21:52:11 +0000865 spin_lock_init(&orig_node->bcast_seqno_lock);
Marek Lindnerf987ed62010-12-12 21:57:12 +0000866 spin_lock_init(&orig_node->neigh_list_lock);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200867 spin_lock_init(&orig_node->tt_buff_lock);
Antonio Quartullia70a9aa2013-07-30 22:16:24 +0200868 spin_lock_init(&orig_node->tt_lock);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200869 spin_lock_init(&orig_node->vlan_list_lock);
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000870
Martin Hundebølld56b1702013-01-25 11:12:39 +0100871 batadv_nc_init_orig(orig_node);
872
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000873 /* extra reference for return */
874 atomic_set(&orig_node->refcount, 2);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000875
Marek Lindner16b1aba2011-01-19 20:01:42 +0000876 orig_node->bat_priv = bat_priv;
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100877 ether_addr_copy(orig_node->orig, addr);
Antonio Quartulli785ea112011-11-23 11:35:44 +0100878 batadv_dat_init_orig_node_addr(orig_node);
Antonio Quartullic8c991b2011-07-07 01:40:57 +0200879 atomic_set(&orig_node->last_ttvn, 0);
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200880 orig_node->tt_buff = NULL;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200881 orig_node->tt_buff_len = 0;
Linus Lüssing2c667a32014-10-30 06:23:40 +0100882 orig_node->last_seen = jiffies;
Sven Eckelmann42d0b0442012-06-03 22:19:17 +0200883 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
884 orig_node->bcast_seqno_reset = reset_time;
Linus Lüssing8a4023c2015-06-16 17:10:26 +0200885
Linus Lüssing60432d72014-02-15 17:47:51 +0100886#ifdef CONFIG_BATMAN_ADV_MCAST
887 orig_node->mcast_flags = BATADV_NO_FLAGS;
Linus Lüssing8a4023c2015-06-16 17:10:26 +0200888 INIT_HLIST_NODE(&orig_node->mcast_want_all_unsnoopables_node);
889 INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv4_node);
890 INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv6_node);
891 spin_lock_init(&orig_node->mcast_handler_lock);
Linus Lüssing60432d72014-02-15 17:47:51 +0100892#endif
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000893
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200894 /* create a vlan object for the "untagged" LAN */
895 vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
896 if (!vlan)
897 goto free_orig_node;
898 /* batadv_orig_node_vlan_new() increases the refcounter.
899 * Immediately release vlan since it is not needed anymore in this
900 * context
901 */
902 batadv_orig_node_vlan_free_ref(vlan);
903
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200904 for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
905 INIT_HLIST_HEAD(&orig_node->fragments[i].head);
906 spin_lock_init(&orig_node->fragments[i].lock);
907 orig_node->fragments[i].size = 0;
908 }
909
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000910 return orig_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000911free_orig_node:
912 kfree(orig_node);
913 return NULL;
914}
915
Simon Wunderlich89652332013-11-13 19:14:46 +0100916/**
Simon Wunderlich709de132014-03-26 15:46:24 +0100917 * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
918 * @bat_priv: the bat priv with all the soft interface information
919 * @neigh: orig node which is to be checked
920 */
921static void
922batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
923 struct batadv_neigh_node *neigh)
924{
925 struct batadv_neigh_ifinfo *neigh_ifinfo;
926 struct batadv_hard_iface *if_outgoing;
927 struct hlist_node *node_tmp;
928
929 spin_lock_bh(&neigh->ifinfo_lock);
930
931 /* for all ifinfo objects for this neighinator */
932 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
933 &neigh->ifinfo_list, list) {
934 if_outgoing = neigh_ifinfo->if_outgoing;
935
936 /* always keep the default interface */
937 if (if_outgoing == BATADV_IF_DEFAULT)
938 continue;
939
940 /* don't purge if the interface is not (going) down */
941 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
942 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
943 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
944 continue;
945
946 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
947 "neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
948 neigh->addr, if_outgoing->net_dev->name);
949
950 hlist_del_rcu(&neigh_ifinfo->list);
951 batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
952 }
953
954 spin_unlock_bh(&neigh->ifinfo_lock);
955}
956
957/**
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100958 * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
959 * @bat_priv: the bat priv with all the soft interface information
960 * @orig_node: orig node which is to be checked
961 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200962 * Return: true if any ifinfo entry was purged, false otherwise.
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100963 */
964static bool
965batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
966 struct batadv_orig_node *orig_node)
967{
968 struct batadv_orig_ifinfo *orig_ifinfo;
969 struct batadv_hard_iface *if_outgoing;
970 struct hlist_node *node_tmp;
971 bool ifinfo_purged = false;
972
973 spin_lock_bh(&orig_node->neigh_list_lock);
974
975 /* for all ifinfo objects for this originator */
976 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
977 &orig_node->ifinfo_list, list) {
978 if_outgoing = orig_ifinfo->if_outgoing;
979
980 /* always keep the default interface */
981 if (if_outgoing == BATADV_IF_DEFAULT)
982 continue;
983
984 /* don't purge if the interface is not (going) down */
985 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
986 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
987 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
988 continue;
989
990 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
991 "router/ifinfo purge: originator %pM, iface: %s\n",
992 orig_node->orig, if_outgoing->net_dev->name);
993
994 ifinfo_purged = true;
995
996 hlist_del_rcu(&orig_ifinfo->list);
997 batadv_orig_ifinfo_free_ref(orig_ifinfo);
Simon Wunderlichf3b3d902013-11-13 19:14:50 +0100998 if (orig_node->last_bonding_candidate == orig_ifinfo) {
999 orig_node->last_bonding_candidate = NULL;
1000 batadv_orig_ifinfo_free_ref(orig_ifinfo);
1001 }
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001002 }
1003
1004 spin_unlock_bh(&orig_node->neigh_list_lock);
1005
1006 return ifinfo_purged;
1007}
1008
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001009/**
Simon Wunderlich89652332013-11-13 19:14:46 +01001010 * batadv_purge_orig_neighbors - purges neighbors from originator
1011 * @bat_priv: the bat priv with all the soft interface information
1012 * @orig_node: orig node which is to be checked
1013 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001014 * Return: true if any neighbor was purged, false otherwise
Simon Wunderlich89652332013-11-13 19:14:46 +01001015 */
Sven Eckelmann56303d32012-06-05 22:31:31 +02001016static bool
1017batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
Simon Wunderlich89652332013-11-13 19:14:46 +01001018 struct batadv_orig_node *orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001019{
Sasha Levinb67bfe02013-02-27 17:06:00 -08001020 struct hlist_node *node_tmp;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001021 struct batadv_neigh_node *neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001022 bool neigh_purged = false;
Marek Lindner0b0094e2012-03-01 15:35:20 +08001023 unsigned long last_seen;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001024 struct batadv_hard_iface *if_incoming;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001025
Marek Lindnerf987ed62010-12-12 21:57:12 +00001026 spin_lock_bh(&orig_node->neigh_list_lock);
1027
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001028 /* for all neighbors towards this originator ... */
Sasha Levinb67bfe02013-02-27 17:06:00 -08001029 hlist_for_each_entry_safe(neigh_node, node_tmp,
Marek Lindner9591a792010-12-12 21:57:11 +00001030 &orig_node->neigh_list, list) {
Sven Eckelmann1eda58b2012-05-12 13:48:58 +02001031 last_seen = neigh_node->last_seen;
1032 if_incoming = neigh_node->if_incoming;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001033
Sven Eckelmann42d0b0442012-06-03 22:19:17 +02001034 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
Sven Eckelmanne9a4f292012-06-03 22:19:19 +02001035 (if_incoming->if_status == BATADV_IF_INACTIVE) ||
1036 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
1037 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +02001038 if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
1039 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
1040 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
Sven Eckelmann39c75a52012-06-03 22:19:22 +02001041 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +02001042 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
1043 orig_node->orig, neigh_node->addr,
1044 if_incoming->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001045 else
Sven Eckelmann39c75a52012-06-03 22:19:22 +02001046 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +02001047 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
1048 orig_node->orig, neigh_node->addr,
1049 jiffies_to_msecs(last_seen));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001050
1051 neigh_purged = true;
Marek Lindner9591a792010-12-12 21:57:11 +00001052
Marek Lindnerf987ed62010-12-12 21:57:12 +00001053 hlist_del_rcu(&neigh_node->list);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001054 batadv_neigh_node_free_ref(neigh_node);
Simon Wunderlich709de132014-03-26 15:46:24 +01001055 } else {
1056 /* only necessary if not the whole neighbor is to be
1057 * deleted, but some interface has been removed.
1058 */
1059 batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001060 }
1061 }
Marek Lindnerf987ed62010-12-12 21:57:12 +00001062
1063 spin_unlock_bh(&orig_node->neigh_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001064 return neigh_purged;
1065}
1066
Simon Wunderlich89652332013-11-13 19:14:46 +01001067/**
1068 * batadv_find_best_neighbor - finds the best neighbor after purging
1069 * @bat_priv: the bat priv with all the soft interface information
1070 * @orig_node: orig node which is to be checked
1071 * @if_outgoing: the interface for which the metric should be compared
1072 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001073 * Return: the current best neighbor, with refcount increased.
Simon Wunderlich89652332013-11-13 19:14:46 +01001074 */
1075static struct batadv_neigh_node *
1076batadv_find_best_neighbor(struct batadv_priv *bat_priv,
1077 struct batadv_orig_node *orig_node,
1078 struct batadv_hard_iface *if_outgoing)
1079{
1080 struct batadv_neigh_node *best = NULL, *neigh;
1081 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
1082
1083 rcu_read_lock();
1084 hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) {
1085 if (best && (bao->bat_neigh_cmp(neigh, if_outgoing,
1086 best, if_outgoing) <= 0))
1087 continue;
1088
Sven Eckelmann77ae32e2016-01-16 10:29:53 +01001089 if (!kref_get_unless_zero(&neigh->refcount))
Simon Wunderlich89652332013-11-13 19:14:46 +01001090 continue;
1091
1092 if (best)
1093 batadv_neigh_node_free_ref(best);
1094
1095 best = neigh;
1096 }
1097 rcu_read_unlock();
1098
1099 return best;
1100}
1101
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001102/**
1103 * batadv_purge_orig_node - purges obsolete information from an orig_node
1104 * @bat_priv: the bat priv with all the soft interface information
1105 * @orig_node: orig node which is to be checked
1106 *
1107 * This function checks if the orig_node or substructures of it have become
1108 * obsolete, and purges this information if that's the case.
1109 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001110 * Return: true if the orig_node is to be removed, false otherwise.
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001111 */
Sven Eckelmann56303d32012-06-05 22:31:31 +02001112static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
1113 struct batadv_orig_node *orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001114{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001115 struct batadv_neigh_node *best_neigh_node;
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001116 struct batadv_hard_iface *hard_iface;
Simon Wunderlich7b955a92014-03-26 15:46:23 +01001117 bool changed_ifinfo, changed_neigh;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001118
Sven Eckelmann42d0b0442012-06-03 22:19:17 +02001119 if (batadv_has_timed_out(orig_node->last_seen,
1120 2 * BATADV_PURGE_TIMEOUT)) {
Sven Eckelmann39c75a52012-06-03 22:19:22 +02001121 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +02001122 "Originator timeout: originator %pM, last_seen %u\n",
1123 orig_node->orig,
1124 jiffies_to_msecs(orig_node->last_seen));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001125 return true;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001126 }
Simon Wunderlich7b955a92014-03-26 15:46:23 +01001127 changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
1128 changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001129
Simon Wunderlich7b955a92014-03-26 15:46:23 +01001130 if (!changed_ifinfo && !changed_neigh)
Simon Wunderlich89652332013-11-13 19:14:46 +01001131 return false;
1132
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001133 /* first for NULL ... */
Simon Wunderlich89652332013-11-13 19:14:46 +01001134 best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node,
1135 BATADV_IF_DEFAULT);
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001136 batadv_update_route(bat_priv, orig_node, BATADV_IF_DEFAULT,
1137 best_neigh_node);
Simon Wunderlich89652332013-11-13 19:14:46 +01001138 if (best_neigh_node)
1139 batadv_neigh_node_free_ref(best_neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001140
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001141 /* ... then for all other interfaces. */
1142 rcu_read_lock();
1143 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
1144 if (hard_iface->if_status != BATADV_IF_ACTIVE)
1145 continue;
1146
1147 if (hard_iface->soft_iface != bat_priv->soft_iface)
1148 continue;
1149
1150 best_neigh_node = batadv_find_best_neighbor(bat_priv,
1151 orig_node,
1152 hard_iface);
1153 batadv_update_route(bat_priv, orig_node, hard_iface,
1154 best_neigh_node);
1155 if (best_neigh_node)
1156 batadv_neigh_node_free_ref(best_neigh_node);
1157 }
1158 rcu_read_unlock();
1159
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001160 return false;
1161}
1162
Sven Eckelmann56303d32012-06-05 22:31:31 +02001163static void _batadv_purge_orig(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001164{
Sven Eckelmann5bf74e92012-06-05 22:31:28 +02001165 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001166 struct hlist_node *node_tmp;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001167 struct hlist_head *head;
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001168 spinlock_t *list_lock; /* spinlock to protect write access */
Sven Eckelmann56303d32012-06-05 22:31:31 +02001169 struct batadv_orig_node *orig_node;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001170 u32 i;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001171
1172 if (!hash)
1173 return;
1174
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001175 /* for all origins... */
1176 for (i = 0; i < hash->size; i++) {
1177 head = &hash->table[i];
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001178 list_lock = &hash->list_locks[i];
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001179
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001180 spin_lock_bh(list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001181 hlist_for_each_entry_safe(orig_node, node_tmp,
Marek Lindner7aadf882011-02-18 12:28:09 +00001182 head, hash_entry) {
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001183 if (batadv_purge_orig_node(bat_priv, orig_node)) {
Marek Lindner414254e2013-04-23 21:39:58 +08001184 batadv_gw_node_delete(bat_priv, orig_node);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001185 hlist_del_rcu(&orig_node->hash_entry);
Linus Lüssing9d31b3c2014-12-13 23:32:15 +01001186 batadv_tt_global_del_orig(orig_node->bat_priv,
1187 orig_node, -1,
1188 "originator timed out");
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001189 batadv_orig_node_free_ref(orig_node);
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001190 continue;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001191 }
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +02001192
1193 batadv_frag_purge_orig(orig_node,
1194 batadv_frag_check_entry);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001195 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001196 spin_unlock_bh(list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001197 }
1198
Sven Eckelmann7cf06bc2012-05-12 02:09:29 +02001199 batadv_gw_election(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001200}
1201
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001202static void batadv_purge_orig(struct work_struct *work)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001203{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001204 struct delayed_work *delayed_work;
1205 struct batadv_priv *bat_priv;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001206
Sven Eckelmann56303d32012-06-05 22:31:31 +02001207 delayed_work = container_of(work, struct delayed_work, work);
1208 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001209 _batadv_purge_orig(bat_priv);
Antonio Quartulli72414442012-12-25 13:14:37 +01001210 queue_delayed_work(batadv_event_workqueue,
1211 &bat_priv->orig_work,
1212 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001213}
1214
Sven Eckelmann56303d32012-06-05 22:31:31 +02001215void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001216{
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001217 _batadv_purge_orig(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001218}
1219
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001220int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001221{
1222 struct net_device *net_dev = (struct net_device *)seq->private;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001223 struct batadv_priv *bat_priv = netdev_priv(net_dev);
Sven Eckelmann56303d32012-06-05 22:31:31 +02001224 struct batadv_hard_iface *primary_if;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001225
Marek Lindner30da63a2012-08-03 17:15:46 +02001226 primary_if = batadv_seq_print_text_primary_if_get(seq);
1227 if (!primary_if)
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001228 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001229
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001230 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
Sven Eckelmann42d0b0442012-06-03 22:19:17 +02001231 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001232 primary_if->net_dev->dev_addr, net_dev->name,
1233 bat_priv->bat_algo_ops->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001234
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001235 batadv_hardif_free_ref(primary_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001236
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001237 if (!bat_priv->bat_algo_ops->bat_orig_print) {
1238 seq_puts(seq,
1239 "No printing function for this routing protocol\n");
1240 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001241 }
1242
Simon Wunderlichcb1c92ec2013-11-21 11:52:16 +01001243 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq,
1244 BATADV_IF_DEFAULT);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001245
Marek Lindner30da63a2012-08-03 17:15:46 +02001246 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001247}
1248
Simon Wunderlichcb1c92ec2013-11-21 11:52:16 +01001249/**
1250 * batadv_orig_hardif_seq_print_text - writes originator infos for a specific
1251 * outgoing interface
1252 * @seq: debugfs table seq_file struct
1253 * @offset: not used
1254 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001255 * Return: 0
Simon Wunderlichcb1c92ec2013-11-21 11:52:16 +01001256 */
1257int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
1258{
1259 struct net_device *net_dev = (struct net_device *)seq->private;
1260 struct batadv_hard_iface *hard_iface;
1261 struct batadv_priv *bat_priv;
1262
1263 hard_iface = batadv_hardif_get_by_netdev(net_dev);
1264
1265 if (!hard_iface || !hard_iface->soft_iface) {
1266 seq_puts(seq, "Interface not known to B.A.T.M.A.N.\n");
1267 goto out;
1268 }
1269
1270 bat_priv = netdev_priv(hard_iface->soft_iface);
1271 if (!bat_priv->bat_algo_ops->bat_orig_print) {
1272 seq_puts(seq,
1273 "No printing function for this routing protocol\n");
1274 goto out;
1275 }
1276
1277 if (hard_iface->if_status != BATADV_IF_ACTIVE) {
1278 seq_puts(seq, "Interface not active\n");
1279 goto out;
1280 }
1281
1282 seq_printf(seq, "[B.A.T.M.A.N. adv %s, IF/MAC: %s/%pM (%s %s)]\n",
1283 BATADV_SOURCE_VERSION, hard_iface->net_dev->name,
1284 hard_iface->net_dev->dev_addr,
1285 hard_iface->soft_iface->name, bat_priv->bat_algo_ops->name);
1286
1287 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
1288
1289out:
Marek Lindner16a41422014-04-24 03:44:25 +08001290 if (hard_iface)
1291 batadv_hardif_free_ref(hard_iface);
Simon Wunderlichcb1c92ec2013-11-21 11:52:16 +01001292 return 0;
1293}
1294
Sven Eckelmann56303d32012-06-05 22:31:31 +02001295int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
1296 int max_if_num)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001297{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001298 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001299 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
Sven Eckelmann5bf74e92012-06-05 22:31:28 +02001300 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001301 struct hlist_head *head;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001302 struct batadv_orig_node *orig_node;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001303 u32 i;
Antonio Quartullic90681b2011-10-05 17:05:25 +02001304 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001305
1306 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001307 * if_num
1308 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001309 for (i = 0; i < hash->size; i++) {
1310 head = &hash->table[i];
1311
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001312 rcu_read_lock();
Sasha Levinb67bfe02013-02-27 17:06:00 -08001313 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001314 ret = 0;
1315 if (bao->bat_orig_add_if)
1316 ret = bao->bat_orig_add_if(orig_node,
1317 max_if_num);
Sven Eckelmann5346c352012-05-05 13:27:28 +02001318 if (ret == -ENOMEM)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001319 goto err;
1320 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001321 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001322 }
1323
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001324 return 0;
1325
1326err:
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001327 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001328 return -ENOMEM;
1329}
1330
Sven Eckelmann56303d32012-06-05 22:31:31 +02001331int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
1332 int max_if_num)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001333{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001334 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Sven Eckelmann5bf74e92012-06-05 22:31:28 +02001335 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001336 struct hlist_head *head;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001337 struct batadv_hard_iface *hard_iface_tmp;
1338 struct batadv_orig_node *orig_node;
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001339 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001340 u32 i;
Antonio Quartullic90681b2011-10-05 17:05:25 +02001341 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001342
1343 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001344 * if_num
1345 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001346 for (i = 0; i < hash->size; i++) {
1347 head = &hash->table[i];
1348
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001349 rcu_read_lock();
Sasha Levinb67bfe02013-02-27 17:06:00 -08001350 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001351 ret = 0;
1352 if (bao->bat_orig_del_if)
1353 ret = bao->bat_orig_del_if(orig_node,
1354 max_if_num,
1355 hard_iface->if_num);
Sven Eckelmann5346c352012-05-05 13:27:28 +02001356 if (ret == -ENOMEM)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001357 goto err;
1358 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001359 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001360 }
1361
1362 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
1363 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +02001364 list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +02001365 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001366 continue;
1367
Marek Lindnere6c10f42011-02-18 12:33:20 +00001368 if (hard_iface == hard_iface_tmp)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001369 continue;
1370
Marek Lindnere6c10f42011-02-18 12:33:20 +00001371 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001372 continue;
1373
Marek Lindnere6c10f42011-02-18 12:33:20 +00001374 if (hard_iface_tmp->if_num > hard_iface->if_num)
1375 hard_iface_tmp->if_num--;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001376 }
1377 rcu_read_unlock();
1378
Marek Lindnere6c10f42011-02-18 12:33:20 +00001379 hard_iface->if_num = -1;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001380 return 0;
1381
1382err:
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001383 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001384 return -ENOMEM;
1385}