448129f74cf9060b4268ee6ebf0c147bc3ea824c
[firefly-linux-kernel-4.4.55.git] / drivers / target / target_core_tpg.c
1 /*******************************************************************************
2  * Filename:  target_core_tpg.c
3  *
4  * This file contains generic Target Portal Group related functions.
5  *
6  * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7  * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8  * Copyright (c) 2007-2010 Rising Tide Systems
9  * Copyright (c) 2008-2010 Linux-iSCSI.org
10  *
11  * Nicholas A. Bellinger <nab@kernel.org>
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26  *
27  ******************************************************************************/
28
29 #include <linux/net.h>
30 #include <linux/string.h>
31 #include <linux/timer.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
34 #include <linux/in.h>
35 #include <net/sock.h>
36 #include <net/tcp.h>
37 #include <scsi/scsi.h>
38 #include <scsi/scsi_cmnd.h>
39
40 #include <target/target_core_base.h>
41 #include <target/target_core_device.h>
42 #include <target/target_core_tpg.h>
43 #include <target/target_core_transport.h>
44 #include <target/target_core_fabric_ops.h>
45
46 #include "target_core_hba.h"
47 #include "target_core_stat.h"
48
49 extern struct se_device *g_lun0_dev;
50
51 static DEFINE_SPINLOCK(tpg_lock);
52 static LIST_HEAD(tpg_list);
53
54 /*      core_clear_initiator_node_from_tpg():
55  *
56  *
57  */
58 static void core_clear_initiator_node_from_tpg(
59         struct se_node_acl *nacl,
60         struct se_portal_group *tpg)
61 {
62         int i;
63         struct se_dev_entry *deve;
64         struct se_lun *lun;
65         struct se_lun_acl *acl, *acl_tmp;
66
67         spin_lock_irq(&nacl->device_list_lock);
68         for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
69                 deve = &nacl->device_list[i];
70
71                 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
72                         continue;
73
74                 if (!deve->se_lun) {
75                         printk(KERN_ERR "%s device entries device pointer is"
76                                 " NULL, but Initiator has access.\n",
77                                 tpg->se_tpg_tfo->get_fabric_name());
78                         continue;
79                 }
80
81                 lun = deve->se_lun;
82                 spin_unlock_irq(&nacl->device_list_lock);
83                 core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
84                         TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
85
86                 spin_lock(&lun->lun_acl_lock);
87                 list_for_each_entry_safe(acl, acl_tmp,
88                                         &lun->lun_acl_list, lacl_list) {
89                         if (!(strcmp(acl->initiatorname,
90                                         nacl->initiatorname)) &&
91                              (acl->mapped_lun == deve->mapped_lun))
92                                 break;
93                 }
94
95                 if (!acl) {
96                         printk(KERN_ERR "Unable to locate struct se_lun_acl for %s,"
97                                 " mapped_lun: %u\n", nacl->initiatorname,
98                                 deve->mapped_lun);
99                         spin_unlock(&lun->lun_acl_lock);
100                         spin_lock_irq(&nacl->device_list_lock);
101                         continue;
102                 }
103
104                 list_del(&acl->lacl_list);
105                 spin_unlock(&lun->lun_acl_lock);
106
107                 spin_lock_irq(&nacl->device_list_lock);
108                 kfree(acl);
109         }
110         spin_unlock_irq(&nacl->device_list_lock);
111 }
112
113 /*      __core_tpg_get_initiator_node_acl():
114  *
115  *      spin_lock_bh(&tpg->acl_node_lock); must be held when calling
116  */
117 struct se_node_acl *__core_tpg_get_initiator_node_acl(
118         struct se_portal_group *tpg,
119         const char *initiatorname)
120 {
121         struct se_node_acl *acl;
122
123         list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
124                 if (!(strcmp(acl->initiatorname, initiatorname)))
125                         return acl;
126         }
127
128         return NULL;
129 }
130
131 /*      core_tpg_get_initiator_node_acl():
132  *
133  *
134  */
135 struct se_node_acl *core_tpg_get_initiator_node_acl(
136         struct se_portal_group *tpg,
137         unsigned char *initiatorname)
138 {
139         struct se_node_acl *acl;
140
141         spin_lock_bh(&tpg->acl_node_lock);
142         list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
143                 if (!(strcmp(acl->initiatorname, initiatorname)) &&
144                    (!(acl->dynamic_node_acl))) {
145                         spin_unlock_bh(&tpg->acl_node_lock);
146                         return acl;
147                 }
148         }
149         spin_unlock_bh(&tpg->acl_node_lock);
150
151         return NULL;
152 }
153
154 /*      core_tpg_add_node_to_devs():
155  *
156  *
157  */
158 void core_tpg_add_node_to_devs(
159         struct se_node_acl *acl,
160         struct se_portal_group *tpg)
161 {
162         int i = 0;
163         u32 lun_access = 0;
164         struct se_lun *lun;
165         struct se_device *dev;
166
167         spin_lock(&tpg->tpg_lun_lock);
168         for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
169                 lun = &tpg->tpg_lun_list[i];
170                 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
171                         continue;
172
173                 spin_unlock(&tpg->tpg_lun_lock);
174
175                 dev = lun->lun_se_dev;
176                 /*
177                  * By default in LIO-Target $FABRIC_MOD,
178                  * demo_mode_write_protect is ON, or READ_ONLY;
179                  */
180                 if (!(tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg))) {
181                         if (dev->dev_flags & DF_READ_ONLY)
182                                 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
183                         else
184                                 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
185                 } else {
186                         /*
187                          * Allow only optical drives to issue R/W in default RO
188                          * demo mode.
189                          */
190                         if (dev->transport->get_device_type(dev) == TYPE_DISK)
191                                 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
192                         else
193                                 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
194                 }
195
196                 printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
197                         " access for LUN in Demo Mode\n",
198                         tpg->se_tpg_tfo->get_fabric_name(),
199                         tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
200                         (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
201                         "READ-WRITE" : "READ-ONLY");
202
203                 core_update_device_list_for_node(lun, NULL, lun->unpacked_lun,
204                                 lun_access, acl, tpg, 1);
205                 spin_lock(&tpg->tpg_lun_lock);
206         }
207         spin_unlock(&tpg->tpg_lun_lock);
208 }
209
210 /*      core_set_queue_depth_for_node():
211  *
212  *
213  */
214 static int core_set_queue_depth_for_node(
215         struct se_portal_group *tpg,
216         struct se_node_acl *acl)
217 {
218         if (!acl->queue_depth) {
219                 printk(KERN_ERR "Queue depth for %s Initiator Node: %s is 0,"
220                         "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
221                         acl->initiatorname);
222                 acl->queue_depth = 1;
223         }
224
225         return 0;
226 }
227
228 /*      core_create_device_list_for_node():
229  *
230  *
231  */
232 static int core_create_device_list_for_node(struct se_node_acl *nacl)
233 {
234         struct se_dev_entry *deve;
235         int i;
236
237         nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
238                                 TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
239         if (!(nacl->device_list)) {
240                 printk(KERN_ERR "Unable to allocate memory for"
241                         " struct se_node_acl->device_list\n");
242                 return -ENOMEM;
243         }
244         for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
245                 deve = &nacl->device_list[i];
246
247                 atomic_set(&deve->ua_count, 0);
248                 atomic_set(&deve->pr_ref_count, 0);
249                 spin_lock_init(&deve->ua_lock);
250                 INIT_LIST_HEAD(&deve->alua_port_list);
251                 INIT_LIST_HEAD(&deve->ua_list);
252         }
253
254         return 0;
255 }
256
257 /*      core_tpg_check_initiator_node_acl()
258  *
259  *
260  */
261 struct se_node_acl *core_tpg_check_initiator_node_acl(
262         struct se_portal_group *tpg,
263         unsigned char *initiatorname)
264 {
265         struct se_node_acl *acl;
266
267         acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
268         if ((acl))
269                 return acl;
270
271         if (!(tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)))
272                 return NULL;
273
274         acl =  tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
275         if (!(acl))
276                 return NULL;
277
278         INIT_LIST_HEAD(&acl->acl_list);
279         INIT_LIST_HEAD(&acl->acl_sess_list);
280         spin_lock_init(&acl->device_list_lock);
281         spin_lock_init(&acl->nacl_sess_lock);
282         atomic_set(&acl->acl_pr_ref_count, 0);
283         acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
284         snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
285         acl->se_tpg = tpg;
286         acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
287         spin_lock_init(&acl->stats_lock);
288         acl->dynamic_node_acl = 1;
289
290         tpg->se_tpg_tfo->set_default_node_attributes(acl);
291
292         if (core_create_device_list_for_node(acl) < 0) {
293                 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
294                 return NULL;
295         }
296
297         if (core_set_queue_depth_for_node(tpg, acl) < 0) {
298                 core_free_device_list_for_node(acl, tpg);
299                 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
300                 return NULL;
301         }
302
303         core_tpg_add_node_to_devs(acl, tpg);
304
305         spin_lock_bh(&tpg->acl_node_lock);
306         list_add_tail(&acl->acl_list, &tpg->acl_node_list);
307         tpg->num_node_acls++;
308         spin_unlock_bh(&tpg->acl_node_lock);
309
310         printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
311                 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
312                 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
313                 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
314
315         return acl;
316 }
317 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
318
319 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
320 {
321         while (atomic_read(&nacl->acl_pr_ref_count) != 0)
322                 cpu_relax();
323 }
324
325 void core_tpg_clear_object_luns(struct se_portal_group *tpg)
326 {
327         int i, ret;
328         struct se_lun *lun;
329
330         spin_lock(&tpg->tpg_lun_lock);
331         for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
332                 lun = &tpg->tpg_lun_list[i];
333
334                 if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
335                     (lun->lun_se_dev == NULL))
336                         continue;
337
338                 spin_unlock(&tpg->tpg_lun_lock);
339                 ret = core_dev_del_lun(tpg, lun->unpacked_lun);
340                 spin_lock(&tpg->tpg_lun_lock);
341         }
342         spin_unlock(&tpg->tpg_lun_lock);
343 }
344 EXPORT_SYMBOL(core_tpg_clear_object_luns);
345
346 /*      core_tpg_add_initiator_node_acl():
347  *
348  *
349  */
350 struct se_node_acl *core_tpg_add_initiator_node_acl(
351         struct se_portal_group *tpg,
352         struct se_node_acl *se_nacl,
353         const char *initiatorname,
354         u32 queue_depth)
355 {
356         struct se_node_acl *acl = NULL;
357
358         spin_lock_bh(&tpg->acl_node_lock);
359         acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
360         if ((acl)) {
361                 if (acl->dynamic_node_acl) {
362                         acl->dynamic_node_acl = 0;
363                         printk(KERN_INFO "%s_TPG[%u] - Replacing dynamic ACL"
364                                 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
365                                 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
366                         spin_unlock_bh(&tpg->acl_node_lock);
367                         /*
368                          * Release the locally allocated struct se_node_acl
369                          * because * core_tpg_add_initiator_node_acl() returned
370                          * a pointer to an existing demo mode node ACL.
371                          */
372                         if (se_nacl)
373                                 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg,
374                                                         se_nacl);
375                         goto done;
376                 }
377
378                 printk(KERN_ERR "ACL entry for %s Initiator"
379                         " Node %s already exists for TPG %u, ignoring"
380                         " request.\n",  tpg->se_tpg_tfo->get_fabric_name(),
381                         initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
382                 spin_unlock_bh(&tpg->acl_node_lock);
383                 return ERR_PTR(-EEXIST);
384         }
385         spin_unlock_bh(&tpg->acl_node_lock);
386
387         if (!(se_nacl)) {
388                 printk("struct se_node_acl pointer is NULL\n");
389                 return ERR_PTR(-EINVAL);
390         }
391         /*
392          * For v4.x logic the se_node_acl_s is hanging off a fabric
393          * dependent structure allocated via
394          * struct target_core_fabric_ops->fabric_make_nodeacl()
395          */
396         acl = se_nacl;
397
398         INIT_LIST_HEAD(&acl->acl_list);
399         INIT_LIST_HEAD(&acl->acl_sess_list);
400         spin_lock_init(&acl->device_list_lock);
401         spin_lock_init(&acl->nacl_sess_lock);
402         atomic_set(&acl->acl_pr_ref_count, 0);
403         acl->queue_depth = queue_depth;
404         snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
405         acl->se_tpg = tpg;
406         acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
407         spin_lock_init(&acl->stats_lock);
408
409         tpg->se_tpg_tfo->set_default_node_attributes(acl);
410
411         if (core_create_device_list_for_node(acl) < 0) {
412                 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
413                 return ERR_PTR(-ENOMEM);
414         }
415
416         if (core_set_queue_depth_for_node(tpg, acl) < 0) {
417                 core_free_device_list_for_node(acl, tpg);
418                 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
419                 return ERR_PTR(-EINVAL);
420         }
421
422         spin_lock_bh(&tpg->acl_node_lock);
423         list_add_tail(&acl->acl_list, &tpg->acl_node_list);
424         tpg->num_node_acls++;
425         spin_unlock_bh(&tpg->acl_node_lock);
426
427 done:
428         printk(KERN_INFO "%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
429                 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
430                 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
431                 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
432
433         return acl;
434 }
435 EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
436
437 /*      core_tpg_del_initiator_node_acl():
438  *
439  *
440  */
441 int core_tpg_del_initiator_node_acl(
442         struct se_portal_group *tpg,
443         struct se_node_acl *acl,
444         int force)
445 {
446         struct se_session *sess, *sess_tmp;
447         int dynamic_acl = 0;
448
449         spin_lock_bh(&tpg->acl_node_lock);
450         if (acl->dynamic_node_acl) {
451                 acl->dynamic_node_acl = 0;
452                 dynamic_acl = 1;
453         }
454         list_del(&acl->acl_list);
455         tpg->num_node_acls--;
456         spin_unlock_bh(&tpg->acl_node_lock);
457
458         spin_lock_bh(&tpg->session_lock);
459         list_for_each_entry_safe(sess, sess_tmp,
460                                 &tpg->tpg_sess_list, sess_list) {
461                 if (sess->se_node_acl != acl)
462                         continue;
463                 /*
464                  * Determine if the session needs to be closed by our context.
465                  */
466                 if (!(tpg->se_tpg_tfo->shutdown_session(sess)))
467                         continue;
468
469                 spin_unlock_bh(&tpg->session_lock);
470                 /*
471                  * If the $FABRIC_MOD session for the Initiator Node ACL exists,
472                  * forcefully shutdown the $FABRIC_MOD session/nexus.
473                  */
474                 tpg->se_tpg_tfo->close_session(sess);
475
476                 spin_lock_bh(&tpg->session_lock);
477         }
478         spin_unlock_bh(&tpg->session_lock);
479
480         core_tpg_wait_for_nacl_pr_ref(acl);
481         core_clear_initiator_node_from_tpg(acl, tpg);
482         core_free_device_list_for_node(acl, tpg);
483
484         printk(KERN_INFO "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
485                 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
486                 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
487                 tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
488
489         return 0;
490 }
491 EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
492
493 /*      core_tpg_set_initiator_node_queue_depth():
494  *
495  *
496  */
497 int core_tpg_set_initiator_node_queue_depth(
498         struct se_portal_group *tpg,
499         unsigned char *initiatorname,
500         u32 queue_depth,
501         int force)
502 {
503         struct se_session *sess, *init_sess = NULL;
504         struct se_node_acl *acl;
505         int dynamic_acl = 0;
506
507         spin_lock_bh(&tpg->acl_node_lock);
508         acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
509         if (!(acl)) {
510                 printk(KERN_ERR "Access Control List entry for %s Initiator"
511                         " Node %s does not exists for TPG %hu, ignoring"
512                         " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
513                         initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
514                 spin_unlock_bh(&tpg->acl_node_lock);
515                 return -ENODEV;
516         }
517         if (acl->dynamic_node_acl) {
518                 acl->dynamic_node_acl = 0;
519                 dynamic_acl = 1;
520         }
521         spin_unlock_bh(&tpg->acl_node_lock);
522
523         spin_lock_bh(&tpg->session_lock);
524         list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
525                 if (sess->se_node_acl != acl)
526                         continue;
527
528                 if (!force) {
529                         printk(KERN_ERR "Unable to change queue depth for %s"
530                                 " Initiator Node: %s while session is"
531                                 " operational.  To forcefully change the queue"
532                                 " depth and force session reinstatement"
533                                 " use the \"force=1\" parameter.\n",
534                                 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
535                         spin_unlock_bh(&tpg->session_lock);
536
537                         spin_lock_bh(&tpg->acl_node_lock);
538                         if (dynamic_acl)
539                                 acl->dynamic_node_acl = 1;
540                         spin_unlock_bh(&tpg->acl_node_lock);
541                         return -EEXIST;
542                 }
543                 /*
544                  * Determine if the session needs to be closed by our context.
545                  */
546                 if (!(tpg->se_tpg_tfo->shutdown_session(sess)))
547                         continue;
548
549                 init_sess = sess;
550                 break;
551         }
552
553         /*
554          * User has requested to change the queue depth for a Initiator Node.
555          * Change the value in the Node's struct se_node_acl, and call
556          * core_set_queue_depth_for_node() to add the requested queue depth.
557          *
558          * Finally call  tpg->se_tpg_tfo->close_session() to force session
559          * reinstatement to occur if there is an active session for the
560          * $FABRIC_MOD Initiator Node in question.
561          */
562         acl->queue_depth = queue_depth;
563
564         if (core_set_queue_depth_for_node(tpg, acl) < 0) {
565                 spin_unlock_bh(&tpg->session_lock);
566                 /*
567                  * Force session reinstatement if
568                  * core_set_queue_depth_for_node() failed, because we assume
569                  * the $FABRIC_MOD has already the set session reinstatement
570                  * bit from tpg->se_tpg_tfo->shutdown_session() called above.
571                  */
572                 if (init_sess)
573                         tpg->se_tpg_tfo->close_session(init_sess);
574
575                 spin_lock_bh(&tpg->acl_node_lock);
576                 if (dynamic_acl)
577                         acl->dynamic_node_acl = 1;
578                 spin_unlock_bh(&tpg->acl_node_lock);
579                 return -EINVAL;
580         }
581         spin_unlock_bh(&tpg->session_lock);
582         /*
583          * If the $FABRIC_MOD session for the Initiator Node ACL exists,
584          * forcefully shutdown the $FABRIC_MOD session/nexus.
585          */
586         if (init_sess)
587                 tpg->se_tpg_tfo->close_session(init_sess);
588
589         printk(KERN_INFO "Successfuly changed queue depth to: %d for Initiator"
590                 " Node: %s on %s Target Portal Group: %u\n", queue_depth,
591                 initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
592                 tpg->se_tpg_tfo->tpg_get_tag(tpg));
593
594         spin_lock_bh(&tpg->acl_node_lock);
595         if (dynamic_acl)
596                 acl->dynamic_node_acl = 1;
597         spin_unlock_bh(&tpg->acl_node_lock);
598
599         return 0;
600 }
601 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
602
603 static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
604 {
605         /* Set in core_dev_setup_virtual_lun0() */
606         struct se_device *dev = g_lun0_dev;
607         struct se_lun *lun = &se_tpg->tpg_virt_lun0;
608         u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
609         int ret;
610
611         lun->unpacked_lun = 0;
612         lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
613         atomic_set(&lun->lun_acl_count, 0);
614         init_completion(&lun->lun_shutdown_comp);
615         INIT_LIST_HEAD(&lun->lun_acl_list);
616         INIT_LIST_HEAD(&lun->lun_cmd_list);
617         spin_lock_init(&lun->lun_acl_lock);
618         spin_lock_init(&lun->lun_cmd_lock);
619         spin_lock_init(&lun->lun_sep_lock);
620
621         ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
622         if (ret < 0)
623                 return ret;
624
625         return 0;
626 }
627
628 static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
629 {
630         struct se_lun *lun = &se_tpg->tpg_virt_lun0;
631
632         core_tpg_post_dellun(se_tpg, lun);
633 }
634
635 int core_tpg_register(
636         struct target_core_fabric_ops *tfo,
637         struct se_wwn *se_wwn,
638         struct se_portal_group *se_tpg,
639         void *tpg_fabric_ptr,
640         int se_tpg_type)
641 {
642         struct se_lun *lun;
643         u32 i;
644
645         se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
646                                 TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
647         if (!(se_tpg->tpg_lun_list)) {
648                 printk(KERN_ERR "Unable to allocate struct se_portal_group->"
649                                 "tpg_lun_list\n");
650                 return -ENOMEM;
651         }
652
653         for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
654                 lun = &se_tpg->tpg_lun_list[i];
655                 lun->unpacked_lun = i;
656                 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
657                 atomic_set(&lun->lun_acl_count, 0);
658                 init_completion(&lun->lun_shutdown_comp);
659                 INIT_LIST_HEAD(&lun->lun_acl_list);
660                 INIT_LIST_HEAD(&lun->lun_cmd_list);
661                 spin_lock_init(&lun->lun_acl_lock);
662                 spin_lock_init(&lun->lun_cmd_lock);
663                 spin_lock_init(&lun->lun_sep_lock);
664         }
665
666         se_tpg->se_tpg_type = se_tpg_type;
667         se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
668         se_tpg->se_tpg_tfo = tfo;
669         se_tpg->se_tpg_wwn = se_wwn;
670         atomic_set(&se_tpg->tpg_pr_ref_count, 0);
671         INIT_LIST_HEAD(&se_tpg->acl_node_list);
672         INIT_LIST_HEAD(&se_tpg->se_tpg_node);
673         INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
674         spin_lock_init(&se_tpg->acl_node_lock);
675         spin_lock_init(&se_tpg->session_lock);
676         spin_lock_init(&se_tpg->tpg_lun_lock);
677
678         if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
679                 if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
680                         kfree(se_tpg);
681                         return -ENOMEM;
682                 }
683         }
684
685         spin_lock_bh(&tpg_lock);
686         list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
687         spin_unlock_bh(&tpg_lock);
688
689         printk(KERN_INFO "TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
690                 " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
691                 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
692                 "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
693                 "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
694
695         return 0;
696 }
697 EXPORT_SYMBOL(core_tpg_register);
698
699 int core_tpg_deregister(struct se_portal_group *se_tpg)
700 {
701         struct se_node_acl *nacl, *nacl_tmp;
702
703         printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
704                 " for endpoint: %s Portal Tag %u\n",
705                 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
706                 "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
707                 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
708                 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
709
710         spin_lock_bh(&tpg_lock);
711         list_del(&se_tpg->se_tpg_node);
712         spin_unlock_bh(&tpg_lock);
713
714         while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
715                 cpu_relax();
716         /*
717          * Release any remaining demo-mode generated se_node_acl that have
718          * not been released because of TFO->tpg_check_demo_mode_cache() == 1
719          * in transport_deregister_session().
720          */
721         spin_lock_bh(&se_tpg->acl_node_lock);
722         list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
723                         acl_list) {
724                 list_del(&nacl->acl_list);
725                 se_tpg->num_node_acls--;
726                 spin_unlock_bh(&se_tpg->acl_node_lock);
727
728                 core_tpg_wait_for_nacl_pr_ref(nacl);
729                 core_free_device_list_for_node(nacl, se_tpg);
730                 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
731
732                 spin_lock_bh(&se_tpg->acl_node_lock);
733         }
734         spin_unlock_bh(&se_tpg->acl_node_lock);
735
736         if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
737                 core_tpg_release_virtual_lun0(se_tpg);
738
739         se_tpg->se_tpg_fabric_ptr = NULL;
740         kfree(se_tpg->tpg_lun_list);
741         return 0;
742 }
743 EXPORT_SYMBOL(core_tpg_deregister);
744
745 struct se_lun *core_tpg_pre_addlun(
746         struct se_portal_group *tpg,
747         u32 unpacked_lun)
748 {
749         struct se_lun *lun;
750
751         if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
752                 printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
753                         "-1: %u for Target Portal Group: %u\n",
754                         tpg->se_tpg_tfo->get_fabric_name(),
755                         unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
756                         tpg->se_tpg_tfo->tpg_get_tag(tpg));
757                 return ERR_PTR(-EOVERFLOW);
758         }
759
760         spin_lock(&tpg->tpg_lun_lock);
761         lun = &tpg->tpg_lun_list[unpacked_lun];
762         if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
763                 printk(KERN_ERR "TPG Logical Unit Number: %u is already active"
764                         " on %s Target Portal Group: %u, ignoring request.\n",
765                         unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
766                         tpg->se_tpg_tfo->tpg_get_tag(tpg));
767                 spin_unlock(&tpg->tpg_lun_lock);
768                 return ERR_PTR(-EINVAL);
769         }
770         spin_unlock(&tpg->tpg_lun_lock);
771
772         return lun;
773 }
774
775 int core_tpg_post_addlun(
776         struct se_portal_group *tpg,
777         struct se_lun *lun,
778         u32 lun_access,
779         void *lun_ptr)
780 {
781         int ret;
782
783         ret = core_dev_export(lun_ptr, tpg, lun);
784         if (ret < 0)
785                 return ret;
786
787         spin_lock(&tpg->tpg_lun_lock);
788         lun->lun_access = lun_access;
789         lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
790         spin_unlock(&tpg->tpg_lun_lock);
791
792         return 0;
793 }
794
795 static void core_tpg_shutdown_lun(
796         struct se_portal_group *tpg,
797         struct se_lun *lun)
798 {
799         core_clear_lun_from_tpg(lun, tpg);
800         transport_clear_lun_from_sessions(lun);
801 }
802
803 struct se_lun *core_tpg_pre_dellun(
804         struct se_portal_group *tpg,
805         u32 unpacked_lun,
806         int *ret)
807 {
808         struct se_lun *lun;
809
810         if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
811                 printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
812                         "-1: %u for Target Portal Group: %u\n",
813                         tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
814                         TRANSPORT_MAX_LUNS_PER_TPG-1,
815                         tpg->se_tpg_tfo->tpg_get_tag(tpg));
816                 return ERR_PTR(-EOVERFLOW);
817         }
818
819         spin_lock(&tpg->tpg_lun_lock);
820         lun = &tpg->tpg_lun_list[unpacked_lun];
821         if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
822                 printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
823                         " Target Portal Group: %u, ignoring request.\n",
824                         tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
825                         tpg->se_tpg_tfo->tpg_get_tag(tpg));
826                 spin_unlock(&tpg->tpg_lun_lock);
827                 return ERR_PTR(-ENODEV);
828         }
829         spin_unlock(&tpg->tpg_lun_lock);
830
831         return lun;
832 }
833
834 int core_tpg_post_dellun(
835         struct se_portal_group *tpg,
836         struct se_lun *lun)
837 {
838         core_tpg_shutdown_lun(tpg, lun);
839
840         core_dev_unexport(lun->lun_se_dev, tpg, lun);
841
842         spin_lock(&tpg->tpg_lun_lock);
843         lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
844         spin_unlock(&tpg->tpg_lun_lock);
845
846         return 0;
847 }