6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <asm/uaccess.h>
23 u32 sysctl_xfrm_aevent_etime = XFRM_AE_ETIME;
24 u32 sysctl_xfrm_aevent_rseqth = XFRM_AE_SEQT_SIZE;
25 /* Each xfrm_state may be linked to two tables:
27 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
28 2. Hash table by daddr to find what SAs exist for given
29 destination/tunnel endpoint. (output)
32 static DEFINE_SPINLOCK(xfrm_state_lock);
34 /* Hash table to find appropriate SA towards given target (endpoint
35 * of tunnel or destination of transport mode) allowed by selector.
37 * Main use is finding SA after policy selected tunnel or transport mode.
38 * Also, it can be used by ah/esp icmp error handler to find offending SA.
40 static struct list_head xfrm_state_bydst[XFRM_DST_HSIZE];
41 static struct list_head xfrm_state_byspi[XFRM_DST_HSIZE];
43 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
44 EXPORT_SYMBOL(km_waitq);
46 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
47 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
49 static struct work_struct xfrm_state_gc_work;
50 static struct list_head xfrm_state_gc_list = LIST_HEAD_INIT(xfrm_state_gc_list);
51 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
53 static int xfrm_state_gc_flush_bundles;
55 static int __xfrm_state_delete(struct xfrm_state *x);
57 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family);
58 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
60 static int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
61 static void km_state_expired(struct xfrm_state *x, int hard);
63 static void xfrm_state_gc_destroy(struct xfrm_state *x)
65 if (del_timer(&x->timer))
67 if (del_timer(&x->rtimer))
74 x->type->destructor(x);
75 xfrm_put_type(x->type);
77 security_xfrm_state_free(x);
81 static void xfrm_state_gc_task(void *data)
84 struct list_head *entry, *tmp;
85 struct list_head gc_list = LIST_HEAD_INIT(gc_list);
87 if (xfrm_state_gc_flush_bundles) {
88 xfrm_state_gc_flush_bundles = 0;
92 spin_lock_bh(&xfrm_state_gc_lock);
93 list_splice_init(&xfrm_state_gc_list, &gc_list);
94 spin_unlock_bh(&xfrm_state_gc_lock);
96 list_for_each_safe(entry, tmp, &gc_list) {
97 x = list_entry(entry, struct xfrm_state, bydst);
98 xfrm_state_gc_destroy(x);
103 static inline unsigned long make_jiffies(long secs)
105 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
106 return MAX_SCHEDULE_TIMEOUT-1;
111 static void xfrm_timer_handler(unsigned long data)
113 struct xfrm_state *x = (struct xfrm_state*)data;
114 unsigned long now = (unsigned long)xtime.tv_sec;
115 long next = LONG_MAX;
119 if (x->km.state == XFRM_STATE_DEAD)
121 if (x->km.state == XFRM_STATE_EXPIRED)
123 if (x->lft.hard_add_expires_seconds) {
124 long tmo = x->lft.hard_add_expires_seconds +
125 x->curlft.add_time - now;
131 if (x->lft.hard_use_expires_seconds) {
132 long tmo = x->lft.hard_use_expires_seconds +
133 (x->curlft.use_time ? : now) - now;
141 if (x->lft.soft_add_expires_seconds) {
142 long tmo = x->lft.soft_add_expires_seconds +
143 x->curlft.add_time - now;
149 if (x->lft.soft_use_expires_seconds) {
150 long tmo = x->lft.soft_use_expires_seconds +
151 (x->curlft.use_time ? : now) - now;
160 km_state_expired(x, 0);
162 if (next != LONG_MAX &&
163 !mod_timer(&x->timer, jiffies + make_jiffies(next)))
168 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
169 x->km.state = XFRM_STATE_EXPIRED;
174 if (!__xfrm_state_delete(x) && x->id.spi)
175 km_state_expired(x, 1);
178 spin_unlock(&x->lock);
182 struct xfrm_state *xfrm_state_alloc(void)
184 struct xfrm_state *x;
186 x = kmalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
189 memset(x, 0, sizeof(struct xfrm_state));
190 atomic_set(&x->refcnt, 1);
191 atomic_set(&x->tunnel_users, 0);
192 INIT_LIST_HEAD(&x->bydst);
193 INIT_LIST_HEAD(&x->byspi);
194 init_timer(&x->timer);
195 x->timer.function = xfrm_timer_handler;
196 x->timer.data = (unsigned long)x;
197 init_timer(&x->rtimer);
198 x->rtimer.function = xfrm_replay_timer_handler;
199 x->rtimer.data = (unsigned long)x;
200 x->curlft.add_time = (unsigned long)xtime.tv_sec;
201 x->lft.soft_byte_limit = XFRM_INF;
202 x->lft.soft_packet_limit = XFRM_INF;
203 x->lft.hard_byte_limit = XFRM_INF;
204 x->lft.hard_packet_limit = XFRM_INF;
205 x->replay_maxage = 0;
206 x->replay_maxdiff = 0;
207 spin_lock_init(&x->lock);
211 EXPORT_SYMBOL(xfrm_state_alloc);
213 void __xfrm_state_destroy(struct xfrm_state *x)
215 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
217 spin_lock_bh(&xfrm_state_gc_lock);
218 list_add(&x->bydst, &xfrm_state_gc_list);
219 spin_unlock_bh(&xfrm_state_gc_lock);
220 schedule_work(&xfrm_state_gc_work);
222 EXPORT_SYMBOL(__xfrm_state_destroy);
224 static int __xfrm_state_delete(struct xfrm_state *x)
228 if (x->km.state != XFRM_STATE_DEAD) {
229 x->km.state = XFRM_STATE_DEAD;
230 spin_lock(&xfrm_state_lock);
237 spin_unlock(&xfrm_state_lock);
238 if (del_timer(&x->timer))
240 if (del_timer(&x->rtimer))
243 /* The number two in this test is the reference
244 * mentioned in the comment below plus the reference
245 * our caller holds. A larger value means that
246 * there are DSTs attached to this xfrm_state.
248 if (atomic_read(&x->refcnt) > 2) {
249 xfrm_state_gc_flush_bundles = 1;
250 schedule_work(&xfrm_state_gc_work);
253 /* All xfrm_state objects are created by xfrm_state_alloc.
254 * The xfrm_state_alloc call gives a reference, and that
255 * is what we are dropping here.
264 int xfrm_state_delete(struct xfrm_state *x)
268 spin_lock_bh(&x->lock);
269 err = __xfrm_state_delete(x);
270 spin_unlock_bh(&x->lock);
274 EXPORT_SYMBOL(xfrm_state_delete);
276 void xfrm_state_flush(u8 proto)
279 struct xfrm_state *x;
281 spin_lock_bh(&xfrm_state_lock);
282 for (i = 0; i < XFRM_DST_HSIZE; i++) {
284 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
285 if (!xfrm_state_kern(x) &&
286 (proto == IPSEC_PROTO_ANY || x->id.proto == proto)) {
288 spin_unlock_bh(&xfrm_state_lock);
290 xfrm_state_delete(x);
293 spin_lock_bh(&xfrm_state_lock);
298 spin_unlock_bh(&xfrm_state_lock);
301 EXPORT_SYMBOL(xfrm_state_flush);
304 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
305 struct xfrm_tmpl *tmpl,
306 xfrm_address_t *daddr, xfrm_address_t *saddr,
307 unsigned short family)
309 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
312 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
313 xfrm_state_put_afinfo(afinfo);
318 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
319 struct flowi *fl, struct xfrm_tmpl *tmpl,
320 struct xfrm_policy *pol, int *err,
321 unsigned short family)
323 unsigned h = xfrm_dst_hash(daddr, family);
324 struct xfrm_state *x, *x0;
325 int acquire_in_progress = 0;
327 struct xfrm_state *best = NULL;
328 struct xfrm_state_afinfo *afinfo;
330 afinfo = xfrm_state_get_afinfo(family);
331 if (afinfo == NULL) {
332 *err = -EAFNOSUPPORT;
336 spin_lock_bh(&xfrm_state_lock);
337 list_for_each_entry(x, xfrm_state_bydst+h, bydst) {
338 if (x->props.family == family &&
339 x->props.reqid == tmpl->reqid &&
340 xfrm_state_addr_check(x, daddr, saddr, family) &&
341 tmpl->mode == x->props.mode &&
342 tmpl->id.proto == x->id.proto &&
343 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
345 1. There is a valid state with matching selector.
347 2. Valid state with inappropriate selector. Skip.
349 Entering area of "sysdeps".
351 3. If state is not valid, selector is temporary,
352 it selects only session which triggered
353 previous resolution. Key manager will do
354 something to install a state with proper
357 if (x->km.state == XFRM_STATE_VALID) {
358 if (!xfrm_selector_match(&x->sel, fl, family) ||
359 !xfrm_sec_ctx_match(pol->security, x->security))
362 best->km.dying > x->km.dying ||
363 (best->km.dying == x->km.dying &&
364 best->curlft.add_time < x->curlft.add_time))
366 } else if (x->km.state == XFRM_STATE_ACQ) {
367 acquire_in_progress = 1;
368 } else if (x->km.state == XFRM_STATE_ERROR ||
369 x->km.state == XFRM_STATE_EXPIRED) {
370 if (xfrm_selector_match(&x->sel, fl, family) &&
371 xfrm_sec_ctx_match(pol->security, x->security))
378 if (!x && !error && !acquire_in_progress) {
380 (x0 = afinfo->state_lookup(daddr, tmpl->id.spi,
381 tmpl->id.proto)) != NULL) {
386 x = xfrm_state_alloc();
391 /* Initialize temporary selector matching only
392 * to current session. */
393 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
395 if (km_query(x, tmpl, pol) == 0) {
396 x->km.state = XFRM_STATE_ACQ;
397 list_add_tail(&x->bydst, xfrm_state_bydst+h);
400 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
401 list_add(&x->byspi, xfrm_state_byspi+h);
404 x->lft.hard_add_expires_seconds = XFRM_ACQ_EXPIRES;
406 x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ;
407 add_timer(&x->timer);
409 x->km.state = XFRM_STATE_DEAD;
419 *err = acquire_in_progress ? -EAGAIN : error;
420 spin_unlock_bh(&xfrm_state_lock);
421 xfrm_state_put_afinfo(afinfo);
425 static void __xfrm_state_insert(struct xfrm_state *x)
427 unsigned h = xfrm_dst_hash(&x->id.daddr, x->props.family);
429 list_add(&x->bydst, xfrm_state_bydst+h);
432 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
434 list_add(&x->byspi, xfrm_state_byspi+h);
437 if (!mod_timer(&x->timer, jiffies + HZ))
440 if (x->replay_maxage &&
441 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
447 void xfrm_state_insert(struct xfrm_state *x)
449 spin_lock_bh(&xfrm_state_lock);
450 __xfrm_state_insert(x);
451 spin_unlock_bh(&xfrm_state_lock);
453 xfrm_flush_all_bundles();
455 EXPORT_SYMBOL(xfrm_state_insert);
457 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
459 int xfrm_state_add(struct xfrm_state *x)
461 struct xfrm_state_afinfo *afinfo;
462 struct xfrm_state *x1;
466 family = x->props.family;
467 afinfo = xfrm_state_get_afinfo(family);
468 if (unlikely(afinfo == NULL))
469 return -EAFNOSUPPORT;
471 spin_lock_bh(&xfrm_state_lock);
473 x1 = afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
482 x1 = __xfrm_find_acq_byseq(x->km.seq);
483 if (x1 && xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family)) {
490 x1 = afinfo->find_acq(
491 x->props.mode, x->props.reqid, x->id.proto,
492 &x->id.daddr, &x->props.saddr, 0);
494 __xfrm_state_insert(x);
498 spin_unlock_bh(&xfrm_state_lock);
499 xfrm_state_put_afinfo(afinfo);
502 xfrm_flush_all_bundles();
505 xfrm_state_delete(x1);
511 EXPORT_SYMBOL(xfrm_state_add);
513 int xfrm_state_update(struct xfrm_state *x)
515 struct xfrm_state_afinfo *afinfo;
516 struct xfrm_state *x1;
519 afinfo = xfrm_state_get_afinfo(x->props.family);
520 if (unlikely(afinfo == NULL))
521 return -EAFNOSUPPORT;
523 spin_lock_bh(&xfrm_state_lock);
524 x1 = afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
530 if (xfrm_state_kern(x1)) {
536 if (x1->km.state == XFRM_STATE_ACQ) {
537 __xfrm_state_insert(x);
543 spin_unlock_bh(&xfrm_state_lock);
544 xfrm_state_put_afinfo(afinfo);
550 xfrm_state_delete(x1);
556 spin_lock_bh(&x1->lock);
557 if (likely(x1->km.state == XFRM_STATE_VALID)) {
558 if (x->encap && x1->encap)
559 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
560 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
563 if (!mod_timer(&x1->timer, jiffies + HZ))
565 if (x1->curlft.use_time)
566 xfrm_state_check_expire(x1);
570 spin_unlock_bh(&x1->lock);
576 EXPORT_SYMBOL(xfrm_state_update);
578 int xfrm_state_check_expire(struct xfrm_state *x)
580 if (!x->curlft.use_time)
581 x->curlft.use_time = (unsigned long)xtime.tv_sec;
583 if (x->km.state != XFRM_STATE_VALID)
586 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
587 x->curlft.packets >= x->lft.hard_packet_limit) {
588 x->km.state = XFRM_STATE_EXPIRED;
589 if (!mod_timer(&x->timer, jiffies))
595 (x->curlft.bytes >= x->lft.soft_byte_limit ||
596 x->curlft.packets >= x->lft.soft_packet_limit)) {
598 km_state_expired(x, 0);
602 EXPORT_SYMBOL(xfrm_state_check_expire);
604 static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb)
606 int nhead = x->props.header_len + LL_RESERVED_SPACE(skb->dst->dev)
610 return pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
612 /* Check tail too... */
616 int xfrm_state_check(struct xfrm_state *x, struct sk_buff *skb)
618 int err = xfrm_state_check_expire(x);
621 err = xfrm_state_check_space(x, skb);
625 EXPORT_SYMBOL(xfrm_state_check);
628 xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto,
629 unsigned short family)
631 struct xfrm_state *x;
632 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
636 spin_lock_bh(&xfrm_state_lock);
637 x = afinfo->state_lookup(daddr, spi, proto);
638 spin_unlock_bh(&xfrm_state_lock);
639 xfrm_state_put_afinfo(afinfo);
642 EXPORT_SYMBOL(xfrm_state_lookup);
645 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
646 xfrm_address_t *daddr, xfrm_address_t *saddr,
647 int create, unsigned short family)
649 struct xfrm_state *x;
650 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
654 spin_lock_bh(&xfrm_state_lock);
655 x = afinfo->find_acq(mode, reqid, proto, daddr, saddr, create);
656 spin_unlock_bh(&xfrm_state_lock);
657 xfrm_state_put_afinfo(afinfo);
660 EXPORT_SYMBOL(xfrm_find_acq);
662 /* Silly enough, but I'm lazy to build resolution list */
664 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
667 struct xfrm_state *x;
669 for (i = 0; i < XFRM_DST_HSIZE; i++) {
670 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
671 if (x->km.seq == seq && x->km.state == XFRM_STATE_ACQ) {
680 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
682 struct xfrm_state *x;
684 spin_lock_bh(&xfrm_state_lock);
685 x = __xfrm_find_acq_byseq(seq);
686 spin_unlock_bh(&xfrm_state_lock);
689 EXPORT_SYMBOL(xfrm_find_acq_byseq);
691 u32 xfrm_get_acqseq(void)
695 static DEFINE_SPINLOCK(acqseq_lock);
697 spin_lock_bh(&acqseq_lock);
698 res = (++acqseq ? : ++acqseq);
699 spin_unlock_bh(&acqseq_lock);
702 EXPORT_SYMBOL(xfrm_get_acqseq);
705 xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi)
708 struct xfrm_state *x0;
713 if (minspi == maxspi) {
714 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
722 minspi = ntohl(minspi);
723 maxspi = ntohl(maxspi);
724 for (h=0; h<maxspi-minspi+1; h++) {
725 spi = minspi + net_random()%(maxspi-minspi+1);
726 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
728 x->id.spi = htonl(spi);
735 spin_lock_bh(&xfrm_state_lock);
736 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
737 list_add(&x->byspi, xfrm_state_byspi+h);
739 spin_unlock_bh(&xfrm_state_lock);
743 EXPORT_SYMBOL(xfrm_alloc_spi);
745 int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
749 struct xfrm_state *x;
753 spin_lock_bh(&xfrm_state_lock);
754 for (i = 0; i < XFRM_DST_HSIZE; i++) {
755 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
756 if (proto == IPSEC_PROTO_ANY || x->id.proto == proto)
765 for (i = 0; i < XFRM_DST_HSIZE; i++) {
766 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
767 if (proto != IPSEC_PROTO_ANY && x->id.proto != proto)
769 err = func(x, --count, data);
775 spin_unlock_bh(&xfrm_state_lock);
778 EXPORT_SYMBOL(xfrm_state_walk);
781 void xfrm_replay_notify(struct xfrm_state *x, int event)
784 /* we send notify messages in case
785 * 1. we updated on of the sequence numbers, and the seqno difference
786 * is at least x->replay_maxdiff, in this case we also update the
787 * timeout of our timer function
788 * 2. if x->replay_maxage has elapsed since last update,
789 * and there were changes
791 * The state structure must be locked!
795 case XFRM_REPLAY_UPDATE:
796 if (x->replay_maxdiff &&
797 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
798 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff))
803 case XFRM_REPLAY_TIMEOUT:
804 if ((x->replay.seq == x->preplay.seq) &&
805 (x->replay.bitmap == x->preplay.bitmap) &&
806 (x->replay.oseq == x->preplay.oseq))
812 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
813 c.event = XFRM_MSG_NEWAE;
814 c.data.aevent = event;
815 km_state_notify(x, &c);
818 if (x->replay_maxage &&
819 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
824 static void xfrm_replay_timer_handler(unsigned long data)
826 struct xfrm_state *x = (struct xfrm_state*)data;
830 if (xfrm_aevent_is_on() && x->km.state == XFRM_STATE_VALID)
831 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
833 spin_unlock(&x->lock);
836 int xfrm_replay_check(struct xfrm_state *x, u32 seq)
842 if (unlikely(seq == 0))
845 if (likely(seq > x->replay.seq))
848 diff = x->replay.seq - seq;
849 if (diff >= x->props.replay_window) {
850 x->stats.replay_window++;
854 if (x->replay.bitmap & (1U << diff)) {
860 EXPORT_SYMBOL(xfrm_replay_check);
862 void xfrm_replay_advance(struct xfrm_state *x, u32 seq)
868 if (seq > x->replay.seq) {
869 diff = seq - x->replay.seq;
870 if (diff < x->props.replay_window)
871 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
873 x->replay.bitmap = 1;
876 diff = x->replay.seq - seq;
877 x->replay.bitmap |= (1U << diff);
880 if (xfrm_aevent_is_on())
881 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
883 EXPORT_SYMBOL(xfrm_replay_advance);
885 static struct list_head xfrm_km_list = LIST_HEAD_INIT(xfrm_km_list);
886 static DEFINE_RWLOCK(xfrm_km_lock);
888 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
892 read_lock(&xfrm_km_lock);
893 list_for_each_entry(km, &xfrm_km_list, list)
894 if (km->notify_policy)
895 km->notify_policy(xp, dir, c);
896 read_unlock(&xfrm_km_lock);
899 void km_state_notify(struct xfrm_state *x, struct km_event *c)
902 read_lock(&xfrm_km_lock);
903 list_for_each_entry(km, &xfrm_km_list, list)
906 read_unlock(&xfrm_km_lock);
909 EXPORT_SYMBOL(km_policy_notify);
910 EXPORT_SYMBOL(km_state_notify);
912 void km_state_expired(struct xfrm_state *x, int hard)
917 c.event = XFRM_MSG_EXPIRE;
918 km_state_notify(x, &c);
925 * We send to all registered managers regardless of failure
926 * We are happy with one success
928 static int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
930 int err = -EINVAL, acqret;
933 read_lock(&xfrm_km_lock);
934 list_for_each_entry(km, &xfrm_km_list, list) {
935 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
939 read_unlock(&xfrm_km_lock);
943 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport)
948 read_lock(&xfrm_km_lock);
949 list_for_each_entry(km, &xfrm_km_list, list) {
951 err = km->new_mapping(x, ipaddr, sport);
955 read_unlock(&xfrm_km_lock);
958 EXPORT_SYMBOL(km_new_mapping);
960 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard)
965 c.event = XFRM_MSG_POLEXPIRE;
966 km_policy_notify(pol, dir, &c);
972 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
977 struct xfrm_policy *pol = NULL;
979 if (optlen <= 0 || optlen > PAGE_SIZE)
982 data = kmalloc(optlen, GFP_KERNEL);
987 if (copy_from_user(data, optval, optlen))
991 read_lock(&xfrm_km_lock);
992 list_for_each_entry(km, &xfrm_km_list, list) {
993 pol = km->compile_policy(sk->sk_family, optname, data,
998 read_unlock(&xfrm_km_lock);
1001 xfrm_sk_policy_insert(sk, err, pol);
1010 EXPORT_SYMBOL(xfrm_user_policy);
1012 int xfrm_register_km(struct xfrm_mgr *km)
1014 write_lock_bh(&xfrm_km_lock);
1015 list_add_tail(&km->list, &xfrm_km_list);
1016 write_unlock_bh(&xfrm_km_lock);
1019 EXPORT_SYMBOL(xfrm_register_km);
1021 int xfrm_unregister_km(struct xfrm_mgr *km)
1023 write_lock_bh(&xfrm_km_lock);
1024 list_del(&km->list);
1025 write_unlock_bh(&xfrm_km_lock);
1028 EXPORT_SYMBOL(xfrm_unregister_km);
1030 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1033 if (unlikely(afinfo == NULL))
1035 if (unlikely(afinfo->family >= NPROTO))
1036 return -EAFNOSUPPORT;
1037 write_lock(&xfrm_state_afinfo_lock);
1038 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1041 afinfo->state_bydst = xfrm_state_bydst;
1042 afinfo->state_byspi = xfrm_state_byspi;
1043 xfrm_state_afinfo[afinfo->family] = afinfo;
1045 write_unlock(&xfrm_state_afinfo_lock);
1048 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1050 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1053 if (unlikely(afinfo == NULL))
1055 if (unlikely(afinfo->family >= NPROTO))
1056 return -EAFNOSUPPORT;
1057 write_lock(&xfrm_state_afinfo_lock);
1058 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1059 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1062 xfrm_state_afinfo[afinfo->family] = NULL;
1063 afinfo->state_byspi = NULL;
1064 afinfo->state_bydst = NULL;
1067 write_unlock(&xfrm_state_afinfo_lock);
1070 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1072 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family)
1074 struct xfrm_state_afinfo *afinfo;
1075 if (unlikely(family >= NPROTO))
1077 read_lock(&xfrm_state_afinfo_lock);
1078 afinfo = xfrm_state_afinfo[family];
1079 if (likely(afinfo != NULL))
1080 read_lock(&afinfo->lock);
1081 read_unlock(&xfrm_state_afinfo_lock);
1085 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1087 if (unlikely(afinfo == NULL))
1089 read_unlock(&afinfo->lock);
1092 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1093 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1096 struct xfrm_state *t = x->tunnel;
1098 if (atomic_read(&t->tunnel_users) == 2)
1099 xfrm_state_delete(t);
1100 atomic_dec(&t->tunnel_users);
1105 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1108 * This function is NOT optimal. For example, with ESP it will give an
1109 * MTU that's usually two bytes short of being optimal. However, it will
1110 * usually give an answer that's a multiple of 4 provided the input is
1111 * also a multiple of 4.
1113 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1117 res -= x->props.header_len;
1125 spin_lock_bh(&x->lock);
1126 if (x->km.state == XFRM_STATE_VALID &&
1127 x->type && x->type->get_max_size)
1128 m = x->type->get_max_size(x, m);
1130 m += x->props.header_len;
1131 spin_unlock_bh(&x->lock);
1141 EXPORT_SYMBOL(xfrm_state_mtu);
1143 int xfrm_init_state(struct xfrm_state *x)
1145 struct xfrm_state_afinfo *afinfo;
1146 int family = x->props.family;
1149 err = -EAFNOSUPPORT;
1150 afinfo = xfrm_state_get_afinfo(family);
1155 if (afinfo->init_flags)
1156 err = afinfo->init_flags(x);
1158 xfrm_state_put_afinfo(afinfo);
1163 err = -EPROTONOSUPPORT;
1164 x->type = xfrm_get_type(x->id.proto, family);
1165 if (x->type == NULL)
1168 err = x->type->init_state(x);
1172 x->km.state = XFRM_STATE_VALID;
1178 EXPORT_SYMBOL(xfrm_init_state);
1180 void __init xfrm_state_init(void)
1184 for (i=0; i<XFRM_DST_HSIZE; i++) {
1185 INIT_LIST_HEAD(&xfrm_state_bydst[i]);
1186 INIT_LIST_HEAD(&xfrm_state_byspi[i]);
1188 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL);