2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/pci.h>
35 #include <linux/poll.h>
36 #include <linux/cdev.h>
37 #include <linux/swap.h>
38 #include <linux/export.h>
39 #include <linux/vmalloc.h>
40 #include <linux/slab.h>
41 #include <linux/highmem.h>
43 #include <linux/jiffies.h>
44 #include <linux/cpu.h>
45 #include <linux/uio.h>
46 #include <asm/pgtable.h>
50 #include "ipath_kernel.h"
51 #include "ipath_common.h"
52 #include "ipath_user_sdma.h"
54 static int ipath_open(struct inode *, struct file *);
55 static int ipath_close(struct inode *, struct file *);
56 static ssize_t ipath_write(struct file *, const char __user *, size_t,
58 static ssize_t ipath_write_iter(struct kiocb *, struct iov_iter *from);
59 static unsigned int ipath_poll(struct file *, struct poll_table_struct *);
60 static int ipath_mmap(struct file *, struct vm_area_struct *);
63 * This is really, really weird shit - write() and writev() here
64 * have completely unrelated semantics. Sucky userland ABI,
67 static const struct file_operations ipath_file_ops = {
70 .write_iter = ipath_write_iter,
72 .release = ipath_close,
75 .llseek = noop_llseek,
79 * Convert kernel virtual addresses to physical addresses so they don't
80 * potentially conflict with the chip addresses used as mmap offsets.
81 * It doesn't really matter what mmap offset we use as long as we can
82 * interpret it correctly.
84 static u64 cvt_kvaddr(void *p)
89 page = vmalloc_to_page(p);
91 paddr = page_to_pfn(page) << PAGE_SHIFT;
96 static int ipath_get_base_info(struct file *fp,
97 void __user *ubase, size_t ubase_size)
99 struct ipath_portdata *pd = port_fp(fp);
101 struct ipath_base_info *kinfo = NULL;
102 struct ipath_devdata *dd = pd->port_dd;
103 unsigned subport_cnt;
107 subport_cnt = pd->port_subport_cnt;
114 master = !subport_fp(fp);
118 /* If port sharing is not requested, allow the old size structure */
120 sz -= 7 * sizeof(u64);
121 if (ubase_size < sz) {
123 "Base size %zu, need %zu (version mismatch?)\n",
129 kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL);
135 ret = dd->ipath_f_get_base_info(pd, kinfo);
139 kinfo->spi_rcvhdr_cnt = dd->ipath_rcvhdrcnt;
140 kinfo->spi_rcvhdrent_size = dd->ipath_rcvhdrentsize;
141 kinfo->spi_tidegrcnt = dd->ipath_rcvegrcnt;
142 kinfo->spi_rcv_egrbufsize = dd->ipath_rcvegrbufsize;
144 * have to mmap whole thing
146 kinfo->spi_rcv_egrbuftotlen =
147 pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size;
148 kinfo->spi_rcv_egrperchunk = pd->port_rcvegrbufs_perchunk;
149 kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen /
150 pd->port_rcvegrbuf_chunks;
151 kinfo->spi_tidcnt = dd->ipath_rcvtidcnt / subport_cnt;
153 kinfo->spi_tidcnt += dd->ipath_rcvtidcnt % subport_cnt;
155 * for this use, may be ipath_cfgports summed over all chips that
156 * are are configured and present
158 kinfo->spi_nports = dd->ipath_cfgports;
159 /* unit (chip/board) our port is on */
160 kinfo->spi_unit = dd->ipath_unit;
161 /* for now, only a single page */
162 kinfo->spi_tid_maxsize = PAGE_SIZE;
165 * Doing this per port, and based on the skip value, etc. This has
166 * to be the actual buffer size, since the protocol code treats it
169 * These have to be set to user addresses in the user code via mmap.
170 * These values are used on return to user code for the mmap target
171 * addresses only. For 32 bit, same 44 bit address problem, so use
172 * the physical address, not virtual. Before 2.6.11, using the
173 * page_address() macro worked, but in 2.6.11, even that returns the
174 * full 64 bit address (upper bits all 1's). So far, using the
175 * physical addresses (or chip offsets, for chip mapping) works, but
176 * no doubt some future kernel release will change that, and we'll be
177 * on to yet another method of dealing with this.
179 kinfo->spi_rcvhdr_base = (u64) pd->port_rcvhdrq_phys;
180 kinfo->spi_rcvhdr_tailaddr = (u64) pd->port_rcvhdrqtailaddr_phys;
181 kinfo->spi_rcv_egrbufs = (u64) pd->port_rcvegr_phys;
182 kinfo->spi_pioavailaddr = (u64) dd->ipath_pioavailregs_phys;
183 kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
184 (void *) dd->ipath_statusp -
185 (void *) dd->ipath_pioavailregs_dma;
187 kinfo->spi_piocnt = pd->port_piocnt;
188 kinfo->spi_piobufbase = (u64) pd->port_piobufs;
189 kinfo->__spi_uregbase = (u64) dd->ipath_uregbase +
190 dd->ipath_ureg_align * pd->port_port;
192 kinfo->spi_piocnt = (pd->port_piocnt / subport_cnt) +
193 (pd->port_piocnt % subport_cnt);
194 /* Master's PIO buffers are after all the slave's */
195 kinfo->spi_piobufbase = (u64) pd->port_piobufs +
197 (pd->port_piocnt - kinfo->spi_piocnt);
199 unsigned slave = subport_fp(fp) - 1;
201 kinfo->spi_piocnt = pd->port_piocnt / subport_cnt;
202 kinfo->spi_piobufbase = (u64) pd->port_piobufs +
203 dd->ipath_palign * kinfo->spi_piocnt * slave;
207 kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase +
208 dd->ipath_ureg_align * pd->port_port;
209 kinfo->spi_port_rcvegrbuf = kinfo->spi_rcv_egrbufs;
210 kinfo->spi_port_rcvhdr_base = kinfo->spi_rcvhdr_base;
211 kinfo->spi_port_rcvhdr_tailaddr = kinfo->spi_rcvhdr_tailaddr;
213 kinfo->__spi_uregbase = cvt_kvaddr(pd->subport_uregbase +
214 PAGE_SIZE * subport_fp(fp));
216 kinfo->spi_rcvhdr_base = cvt_kvaddr(pd->subport_rcvhdr_base +
217 pd->port_rcvhdrq_size * subport_fp(fp));
218 kinfo->spi_rcvhdr_tailaddr = 0;
219 kinfo->spi_rcv_egrbufs = cvt_kvaddr(pd->subport_rcvegrbuf +
220 pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size *
223 kinfo->spi_subport_uregbase =
224 cvt_kvaddr(pd->subport_uregbase);
225 kinfo->spi_subport_rcvegrbuf =
226 cvt_kvaddr(pd->subport_rcvegrbuf);
227 kinfo->spi_subport_rcvhdr_base =
228 cvt_kvaddr(pd->subport_rcvhdr_base);
229 ipath_cdbg(PROC, "port %u flags %x %llx %llx %llx\n",
230 kinfo->spi_port, kinfo->spi_runtime_flags,
231 (unsigned long long) kinfo->spi_subport_uregbase,
232 (unsigned long long) kinfo->spi_subport_rcvegrbuf,
233 (unsigned long long) kinfo->spi_subport_rcvhdr_base);
237 * All user buffers are 2KB buffers. If we ever support
238 * giving 4KB buffers to user processes, this will need some
241 kinfo->spi_pioindex = (kinfo->spi_piobufbase -
242 (dd->ipath_piobufbase & 0xffffffff)) / dd->ipath_palign;
243 kinfo->spi_pioalign = dd->ipath_palign;
245 kinfo->spi_qpair = IPATH_KD_QP;
247 * user mode PIO buffers are always 2KB, even when 4KB can
248 * be received, and sent via the kernel; this is ibmaxlen
251 kinfo->spi_piosize = dd->ipath_piosize2k - 2 * sizeof(u32);
252 kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */
253 kinfo->spi_port = pd->port_port;
254 kinfo->spi_subport = subport_fp(fp);
255 kinfo->spi_sw_version = IPATH_KERN_SWVERSION;
256 kinfo->spi_hw_version = dd->ipath_revision;
259 kinfo->spi_runtime_flags |= IPATH_RUNTIME_MASTER;
262 sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo);
263 if (copy_to_user(ubase, kinfo, sz))
272 * ipath_tid_update - update a port TID
274 * @fp: the ipath device file
275 * @ti: the TID information
277 * The new implementation as of Oct 2004 is that the driver assigns
278 * the tid and returns it to the caller. To make it easier to
279 * catch bugs, and to reduce search time, we keep a cursor for
280 * each port, walking the shadow tid array to find one that's not
283 * For now, if we can't allocate the full list, we fail, although
284 * in the long run, we'll allocate as many as we can, and the
285 * caller will deal with that by trying the remaining pages later.
286 * That means that when we fail, we have to mark the tids as not in
287 * use again, in our shadow copy.
289 * It's up to the caller to free the tids when they are done.
290 * We'll unlock the pages as they free them.
292 * Also, right now we are locking one page at a time, but since
293 * the intended use of this routine is for a single group of
294 * virtually contiguous pages, that should change to improve
297 static int ipath_tid_update(struct ipath_portdata *pd, struct file *fp,
298 const struct ipath_tid_info *ti)
301 u32 tid, porttid, cnt, i, tidcnt, tidoff;
303 struct ipath_devdata *dd = pd->port_dd;
306 u64 __iomem *tidbase;
307 unsigned long tidmap[8];
308 struct page **pagep = NULL;
309 unsigned subport = subport_fp(fp);
311 if (!dd->ipath_pageshadow) {
318 ipath_dbg("After copyin, tidcnt 0, tidlist %llx\n",
319 (unsigned long long) ti->tidlist);
321 * Should we treat as success? likely a bug
326 porttid = pd->port_port * dd->ipath_rcvtidcnt;
327 if (!pd->port_subport_cnt) {
328 tidcnt = dd->ipath_rcvtidcnt;
329 tid = pd->port_tidcursor;
331 } else if (!subport) {
332 tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) +
333 (dd->ipath_rcvtidcnt % pd->port_subport_cnt);
334 tidoff = dd->ipath_rcvtidcnt - tidcnt;
336 tid = tidcursor_fp(fp);
338 tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt;
339 tidoff = tidcnt * (subport - 1);
341 tid = tidcursor_fp(fp);
344 /* make sure it all fits in port_tid_pg_list */
345 dev_info(&dd->pcidev->dev, "Process tried to allocate %u "
346 "TIDs, only trying max (%u)\n", cnt, tidcnt);
349 pagep = &((struct page **) pd->port_tid_pg_list)[tidoff];
350 tidlist = &((u16 *) &pagep[dd->ipath_rcvtidcnt])[tidoff];
352 memset(tidmap, 0, sizeof(tidmap));
353 /* before decrement; chip actual # */
355 tidbase = (u64 __iomem *) (((char __iomem *) dd->ipath_kregbase) +
356 dd->ipath_rcvtidbase +
357 porttid * sizeof(*tidbase));
359 ipath_cdbg(VERBOSE, "Port%u %u tids, cursor %u, tidbase %p\n",
360 pd->port_port, cnt, tid, tidbase);
362 /* virtual address of first page in transfer */
363 vaddr = ti->tidvaddr;
364 if (!access_ok(VERIFY_WRITE, (void __user *) vaddr,
366 ipath_dbg("Fail vaddr %p, %u pages, !access_ok\n",
371 ret = ipath_get_user_pages(vaddr, cnt, pagep);
374 ipath_dbg("Failed to lock addr %p, %u pages "
375 "(already locked)\n",
376 (void *) vaddr, cnt);
378 * for now, continue, and see what happens but with
379 * the new implementation, this should never happen,
380 * unless perhaps the user has mpin'ed the pages
381 * themselves (something we need to test)
385 dev_info(&dd->pcidev->dev,
386 "Failed to lock addr %p, %u pages: "
387 "errno %d\n", (void *) vaddr, cnt, -ret);
391 for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
392 for (; ntids--; tid++) {
395 if (!dd->ipath_pageshadow[porttid + tid])
400 * oops, wrapped all the way through their TIDs,
401 * and didn't have enough free; see comments at
404 ipath_dbg("Not enough free TIDs for %u pages "
405 "(index %d), failing\n", cnt, i);
406 i--; /* last tidlist[i] not filled in */
410 tidlist[i] = tid + tidoff;
411 ipath_cdbg(VERBOSE, "Updating idx %u to TID %u, "
412 "vaddr %lx\n", i, tid + tidoff, vaddr);
413 /* we "know" system pages and TID pages are same size */
414 dd->ipath_pageshadow[porttid + tid] = pagep[i];
415 dd->ipath_physshadow[porttid + tid] = ipath_map_page(
416 dd->pcidev, pagep[i], 0, PAGE_SIZE,
419 * don't need atomic or it's overhead
421 __set_bit(tid, tidmap);
422 physaddr = dd->ipath_physshadow[porttid + tid];
423 ipath_stats.sps_pagelocks++;
425 "TID %u, vaddr %lx, physaddr %llx pgp %p\n",
426 tid, vaddr, (unsigned long long) physaddr,
428 dd->ipath_f_put_tid(dd, &tidbase[tid], RCVHQ_RCV_TYPE_EXPECTED,
431 * don't check this tid in ipath_portshadow, since we
432 * just filled it in; start with the next one.
440 /* jump here if copy out of updated info failed... */
441 ipath_dbg("After failure (ret=%d), undo %d of %d entries\n",
443 /* same code that's in ipath_free_tid() */
444 limit = sizeof(tidmap) * BITS_PER_BYTE;
446 /* just in case size changes in future */
448 tid = find_first_bit((const unsigned long *)tidmap, limit);
449 for (; tid < limit; tid++) {
450 if (!test_bit(tid, tidmap))
452 if (dd->ipath_pageshadow[porttid + tid]) {
453 ipath_cdbg(VERBOSE, "Freeing TID %u\n",
455 dd->ipath_f_put_tid(dd, &tidbase[tid],
456 RCVHQ_RCV_TYPE_EXPECTED,
457 dd->ipath_tidinvalid);
458 pci_unmap_page(dd->pcidev,
459 dd->ipath_physshadow[porttid + tid],
460 PAGE_SIZE, PCI_DMA_FROMDEVICE);
461 dd->ipath_pageshadow[porttid + tid] = NULL;
462 ipath_stats.sps_pageunlocks++;
465 ipath_release_user_pages(pagep, cnt);
468 * Copy the updated array, with ipath_tid's filled in, back
469 * to user. Since we did the copy in already, this "should
470 * never fail" If it does, we have to clean up...
472 if (copy_to_user((void __user *)
473 (unsigned long) ti->tidlist,
474 tidlist, cnt * sizeof(*tidlist))) {
478 if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
479 tidmap, sizeof tidmap)) {
485 if (!pd->port_subport_cnt)
486 pd->port_tidcursor = tid;
488 tidcursor_fp(fp) = tid;
493 ipath_dbg("Failed to map %u TID pages, failing with %d\n",
499 * ipath_tid_free - free a port TID
501 * @subport: the subport
504 * right now we are unlocking one page at a time, but since
505 * the intended use of this routine is for a single group of
506 * virtually contiguous pages, that should change to improve
507 * performance. We check that the TID is in range for this port
508 * but otherwise don't check validity; if user has an error and
509 * frees the wrong tid, it's only their own data that can thereby
510 * be corrupted. We do check that the TID was in use, for sanity
511 * We always use our idea of the saved address, not the address that
512 * they pass in to us.
515 static int ipath_tid_free(struct ipath_portdata *pd, unsigned subport,
516 const struct ipath_tid_info *ti)
519 u32 tid, porttid, cnt, limit, tidcnt;
520 struct ipath_devdata *dd = pd->port_dd;
521 u64 __iomem *tidbase;
522 unsigned long tidmap[8];
524 if (!dd->ipath_pageshadow) {
529 if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
535 porttid = pd->port_port * dd->ipath_rcvtidcnt;
536 if (!pd->port_subport_cnt)
537 tidcnt = dd->ipath_rcvtidcnt;
539 tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) +
540 (dd->ipath_rcvtidcnt % pd->port_subport_cnt);
541 porttid += dd->ipath_rcvtidcnt - tidcnt;
543 tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt;
544 porttid += tidcnt * (subport - 1);
546 tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) +
547 dd->ipath_rcvtidbase +
548 porttid * sizeof(*tidbase));
550 limit = sizeof(tidmap) * BITS_PER_BYTE;
552 /* just in case size changes in future */
554 tid = find_first_bit(tidmap, limit);
555 ipath_cdbg(VERBOSE, "Port%u free %u tids; first bit (max=%d) "
556 "set is %d, porttid %u\n", pd->port_port, ti->tidcnt,
557 limit, tid, porttid);
558 for (cnt = 0; tid < limit; tid++) {
560 * small optimization; if we detect a run of 3 or so without
561 * any set, use find_first_bit again. That's mainly to
562 * accelerate the case where we wrapped, so we have some at
563 * the beginning, and some at the end, and a big gap
566 if (!test_bit(tid, tidmap))
569 if (dd->ipath_pageshadow[porttid + tid]) {
571 p = dd->ipath_pageshadow[porttid + tid];
572 dd->ipath_pageshadow[porttid + tid] = NULL;
573 ipath_cdbg(VERBOSE, "PID %u freeing TID %u\n",
574 pid_nr(pd->port_pid), tid);
575 dd->ipath_f_put_tid(dd, &tidbase[tid],
576 RCVHQ_RCV_TYPE_EXPECTED,
577 dd->ipath_tidinvalid);
578 pci_unmap_page(dd->pcidev,
579 dd->ipath_physshadow[porttid + tid],
580 PAGE_SIZE, PCI_DMA_FROMDEVICE);
581 ipath_release_user_pages(&p, 1);
582 ipath_stats.sps_pageunlocks++;
584 ipath_dbg("Unused tid %u, ignoring\n", tid);
586 if (cnt != ti->tidcnt)
587 ipath_dbg("passed in tidcnt %d, only %d bits set in map\n",
591 ipath_dbg("Failed to unmap %u TID pages, failing with %d\n",
597 * ipath_set_part_key - set a partition key
601 * We can have up to 4 active at a time (other than the default, which is
602 * always allowed). This is somewhat tricky, since multiple ports may set
603 * the same key, so we reference count them, and clean up at exit. All 4
604 * partition keys are packed into a single infinipath register. It's an
605 * error for a process to set the same pkey multiple times. We provide no
606 * mechanism to de-allocate a pkey at this time, we may eventually need to
607 * do that. I've used the atomic operations, and no locking, and only make
608 * a single pass through what's available. This should be more than
609 * adequate for some time. I'll think about spinlocks or the like if and as
612 static int ipath_set_part_key(struct ipath_portdata *pd, u16 key)
614 struct ipath_devdata *dd = pd->port_dd;
615 int i, any = 0, pidx = -1;
616 u16 lkey = key & 0x7FFF;
619 if (lkey == (IPATH_DEFAULT_P_KEY & 0x7FFF)) {
620 /* nothing to do; this key always valid */
625 ipath_cdbg(VERBOSE, "p%u try to set pkey %hx, current keys "
626 "%hx:%x %hx:%x %hx:%x %hx:%x\n",
627 pd->port_port, key, dd->ipath_pkeys[0],
628 atomic_read(&dd->ipath_pkeyrefs[0]), dd->ipath_pkeys[1],
629 atomic_read(&dd->ipath_pkeyrefs[1]), dd->ipath_pkeys[2],
630 atomic_read(&dd->ipath_pkeyrefs[2]), dd->ipath_pkeys[3],
631 atomic_read(&dd->ipath_pkeyrefs[3]));
634 ipath_cdbg(PROC, "p%u tries to set key 0, not allowed\n",
641 * Set the full membership bit, because it has to be
642 * set in the register or the packet, and it seems
643 * cleaner to set in the register than to force all
644 * callers to set it. (see bug 4331)
648 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
649 if (!pd->port_pkeys[i] && pidx == -1)
651 if (pd->port_pkeys[i] == key) {
652 ipath_cdbg(VERBOSE, "p%u tries to set same pkey "
653 "(%x) more than once\n",
660 ipath_dbg("All pkeys for port %u already in use, "
661 "can't set %x\n", pd->port_port, key);
665 for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
666 if (!dd->ipath_pkeys[i]) {
670 if (dd->ipath_pkeys[i] == key) {
671 atomic_t *pkrefs = &dd->ipath_pkeyrefs[i];
673 if (atomic_inc_return(pkrefs) > 1) {
674 pd->port_pkeys[pidx] = key;
675 ipath_cdbg(VERBOSE, "p%u set key %x "
676 "matches #%d, count now %d\n",
677 pd->port_port, key, i,
678 atomic_read(pkrefs));
683 * lost race, decrement count, catch below
686 ipath_cdbg(VERBOSE, "Lost race, count was "
687 "0, after dec, it's %d\n",
688 atomic_read(pkrefs));
692 if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
694 * It makes no sense to have both the limited and
695 * full membership PKEY set at the same time since
696 * the unlimited one will disable the limited one.
703 ipath_dbg("port %u, all pkeys already in use, "
704 "can't set %x\n", pd->port_port, key);
708 for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
709 if (!dd->ipath_pkeys[i] &&
710 atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
713 /* for ipathstats, etc. */
714 ipath_stats.sps_pkeys[i] = lkey;
715 pd->port_pkeys[pidx] = dd->ipath_pkeys[i] = key;
717 (u64) dd->ipath_pkeys[0] |
718 ((u64) dd->ipath_pkeys[1] << 16) |
719 ((u64) dd->ipath_pkeys[2] << 32) |
720 ((u64) dd->ipath_pkeys[3] << 48);
721 ipath_cdbg(PROC, "p%u set key %x in #%d, "
722 "portidx %d, new pkey reg %llx\n",
723 pd->port_port, key, i, pidx,
724 (unsigned long long) pkey);
726 dd, dd->ipath_kregs->kr_partitionkey, pkey);
732 ipath_dbg("port %u, all pkeys already in use 2nd pass, "
733 "can't set %x\n", pd->port_port, key);
741 * ipath_manage_rcvq - manage a port's receive queue
743 * @subport: the subport
744 * @start_stop: action to carry out
746 * start_stop == 0 disables receive on the port, for use in queue
747 * overflow conditions. start_stop==1 re-enables, to be used to
748 * re-init the software copy of the head register
750 static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport,
753 struct ipath_devdata *dd = pd->port_dd;
755 ipath_cdbg(PROC, "%sabling rcv for unit %u port %u:%u\n",
756 start_stop ? "en" : "dis", dd->ipath_unit,
757 pd->port_port, subport);
760 /* atomically clear receive enable port. */
763 * On enable, force in-memory copy of the tail register to
764 * 0, so that protocol code doesn't have to worry about
765 * whether or not the chip has yet updated the in-memory
766 * copy or not on return from the system call. The chip
767 * always resets it's tail register back to 0 on a
768 * transition from disabled to enabled. This could cause a
769 * problem if software was broken, and did the enable w/o
770 * the disable, but eventually the in-memory copy will be
771 * updated and correct itself, even in the face of software
774 if (pd->port_rcvhdrtail_kvaddr)
775 ipath_clear_rcvhdrtail(pd);
776 set_bit(dd->ipath_r_portenable_shift + pd->port_port,
779 clear_bit(dd->ipath_r_portenable_shift + pd->port_port,
781 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
783 /* now be sure chip saw it before we return */
784 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
787 * And try to be sure that tail reg update has happened too.
788 * This should in theory interlock with the RXE changes to
789 * the tail register. Don't assign it to the tail register
790 * in memory copy, since we could overwrite an update by the
793 ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
795 /* always; new head should be equal to new tail; see above */
800 static void ipath_clean_part_key(struct ipath_portdata *pd,
801 struct ipath_devdata *dd)
803 int i, j, pchanged = 0;
806 /* for debugging only */
807 oldpkey = (u64) dd->ipath_pkeys[0] |
808 ((u64) dd->ipath_pkeys[1] << 16) |
809 ((u64) dd->ipath_pkeys[2] << 32) |
810 ((u64) dd->ipath_pkeys[3] << 48);
812 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
813 if (!pd->port_pkeys[i])
815 ipath_cdbg(VERBOSE, "look for key[%d] %hx in pkeys\n", i,
817 for (j = 0; j < ARRAY_SIZE(dd->ipath_pkeys); j++) {
818 /* check for match independent of the global bit */
819 if ((dd->ipath_pkeys[j] & 0x7fff) !=
820 (pd->port_pkeys[i] & 0x7fff))
822 if (atomic_dec_and_test(&dd->ipath_pkeyrefs[j])) {
823 ipath_cdbg(VERBOSE, "p%u clear key "
826 pd->port_pkeys[i], j);
827 ipath_stats.sps_pkeys[j] =
828 dd->ipath_pkeys[j] = 0;
831 ipath_cdbg(VERBOSE, "p%u key %x matches #%d, "
832 "but ref still %d\n", pd->port_port,
833 pd->port_pkeys[i], j,
834 atomic_read(&dd->ipath_pkeyrefs[j]));
838 pd->port_pkeys[i] = 0;
841 u64 pkey = (u64) dd->ipath_pkeys[0] |
842 ((u64) dd->ipath_pkeys[1] << 16) |
843 ((u64) dd->ipath_pkeys[2] << 32) |
844 ((u64) dd->ipath_pkeys[3] << 48);
845 ipath_cdbg(VERBOSE, "p%u old pkey reg %llx, "
846 "new pkey reg %llx\n", pd->port_port,
847 (unsigned long long) oldpkey,
848 (unsigned long long) pkey);
849 ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
855 * Initialize the port data with the receive buffer sizes
856 * so this can be done while the master port is locked.
857 * Otherwise, there is a race with a slave opening the port
858 * and seeing these fields uninitialized.
860 static void init_user_egr_sizes(struct ipath_portdata *pd)
862 struct ipath_devdata *dd = pd->port_dd;
863 unsigned egrperchunk, egrcnt, size;
866 * to avoid wasting a lot of memory, we allocate 32KB chunks of
867 * physically contiguous memory, advance through it until used up
868 * and then allocate more. Of course, we need memory to store those
869 * extra pointers, now. Started out with 256KB, but under heavy
870 * memory pressure (creating large files and then copying them over
871 * NFS while doing lots of MPI jobs), we hit some allocation
872 * failures, even though we can sleep... (2.6.10) Still get
873 * failures at 64K. 32K is the lowest we can go without wasting
877 egrperchunk = size / dd->ipath_rcvegrbufsize;
878 egrcnt = dd->ipath_rcvegrcnt;
879 pd->port_rcvegrbuf_chunks = (egrcnt + egrperchunk - 1) / egrperchunk;
880 pd->port_rcvegrbufs_perchunk = egrperchunk;
881 pd->port_rcvegrbuf_size = size;
885 * ipath_create_user_egr - allocate eager TID buffers
886 * @pd: the port to allocate TID buffers for
888 * This routine is now quite different for user and kernel, because
889 * the kernel uses skb's, for the accelerated network performance
890 * This is the user port version
892 * Allocate the eager TID buffers and program them into infinipath
893 * They are no longer completely contiguous, we do multiple allocation
896 static int ipath_create_user_egr(struct ipath_portdata *pd)
898 struct ipath_devdata *dd = pd->port_dd;
899 unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff;
905 * GFP_USER, but without GFP_FS, so buffer cache can be
906 * coalesced (we hope); otherwise, even at order 4,
907 * heavy filesystem activity makes these fail, and we can
908 * use compound pages.
910 gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
912 egrcnt = dd->ipath_rcvegrcnt;
913 /* TID number offset for this port */
914 egroff = (pd->port_port - 1) * egrcnt + dd->ipath_p0_rcvegrcnt;
915 egrsize = dd->ipath_rcvegrbufsize;
916 ipath_cdbg(VERBOSE, "Allocating %d egr buffers, at egrtid "
917 "offset %x, egrsize %u\n", egrcnt, egroff, egrsize);
919 chunk = pd->port_rcvegrbuf_chunks;
920 egrperchunk = pd->port_rcvegrbufs_perchunk;
921 size = pd->port_rcvegrbuf_size;
922 pd->port_rcvegrbuf = kmalloc(chunk * sizeof(pd->port_rcvegrbuf[0]),
924 if (!pd->port_rcvegrbuf) {
928 pd->port_rcvegrbuf_phys =
929 kmalloc(chunk * sizeof(pd->port_rcvegrbuf_phys[0]),
931 if (!pd->port_rcvegrbuf_phys) {
935 for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
937 pd->port_rcvegrbuf[e] = dma_alloc_coherent(
938 &dd->pcidev->dev, size, &pd->port_rcvegrbuf_phys[e],
941 if (!pd->port_rcvegrbuf[e]) {
943 goto bail_rcvegrbuf_phys;
947 pd->port_rcvegr_phys = pd->port_rcvegrbuf_phys[0];
949 for (e = chunk = 0; chunk < pd->port_rcvegrbuf_chunks; chunk++) {
950 dma_addr_t pa = pd->port_rcvegrbuf_phys[chunk];
953 for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) {
954 dd->ipath_f_put_tid(dd, e + egroff +
958 dd->ipath_rcvegrbase),
959 RCVHQ_RCV_TYPE_EAGER, pa);
962 cond_resched(); /* don't hog the cpu */
969 for (e = 0; e < pd->port_rcvegrbuf_chunks &&
970 pd->port_rcvegrbuf[e]; e++) {
971 dma_free_coherent(&dd->pcidev->dev, size,
972 pd->port_rcvegrbuf[e],
973 pd->port_rcvegrbuf_phys[e]);
976 kfree(pd->port_rcvegrbuf_phys);
977 pd->port_rcvegrbuf_phys = NULL;
979 kfree(pd->port_rcvegrbuf);
980 pd->port_rcvegrbuf = NULL;
986 /* common code for the mappings on dma_alloc_coherent mem */
987 static int ipath_mmap_mem(struct vm_area_struct *vma,
988 struct ipath_portdata *pd, unsigned len, int write_ok,
989 void *kvaddr, char *what)
991 struct ipath_devdata *dd = pd->port_dd;
995 if ((vma->vm_end - vma->vm_start) > len) {
996 dev_info(&dd->pcidev->dev,
997 "FAIL on %s: len %lx > %x\n", what,
998 vma->vm_end - vma->vm_start, len);
1004 if (vma->vm_flags & VM_WRITE) {
1005 dev_info(&dd->pcidev->dev,
1006 "%s must be mapped readonly\n", what);
1011 /* don't allow them to later change with mprotect */
1012 vma->vm_flags &= ~VM_MAYWRITE;
1015 pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
1016 ret = remap_pfn_range(vma, vma->vm_start, pfn,
1017 len, vma->vm_page_prot);
1019 dev_info(&dd->pcidev->dev, "%s port%u mmap of %lx, %x "
1020 "bytes r%c failed: %d\n", what, pd->port_port,
1021 pfn, len, write_ok?'w':'o', ret);
1023 ipath_cdbg(VERBOSE, "%s port%u mmaped %lx, %x bytes "
1024 "r%c\n", what, pd->port_port, pfn, len,
1030 static int mmap_ureg(struct vm_area_struct *vma, struct ipath_devdata *dd,
1037 * This is real hardware, so use io_remap. This is the mechanism
1038 * for the user process to update the head registers for their port
1041 if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
1042 dev_info(&dd->pcidev->dev, "FAIL mmap userreg: reqlen "
1043 "%lx > PAGE\n", vma->vm_end - vma->vm_start);
1046 phys = dd->ipath_physaddr + ureg;
1047 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1049 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
1050 ret = io_remap_pfn_range(vma, vma->vm_start,
1052 vma->vm_end - vma->vm_start,
1058 static int mmap_piobufs(struct vm_area_struct *vma,
1059 struct ipath_devdata *dd,
1060 struct ipath_portdata *pd,
1061 unsigned piobufs, unsigned piocnt)
1067 * When we map the PIO buffers in the chip, we want to map them as
1068 * writeonly, no read possible. This prevents access to previous
1069 * process data, and catches users who might try to read the i/o
1070 * space due to a bug.
1072 if ((vma->vm_end - vma->vm_start) > (piocnt * dd->ipath_palign)) {
1073 dev_info(&dd->pcidev->dev, "FAIL mmap piobufs: "
1074 "reqlen %lx > PAGE\n",
1075 vma->vm_end - vma->vm_start);
1080 phys = dd->ipath_physaddr + piobufs;
1082 #if defined(__powerpc__)
1083 /* There isn't a generic way to specify writethrough mappings */
1084 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
1085 pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU;
1086 pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED;
1090 * don't allow them to later change to readable with mprotect (for when
1091 * not initially mapped readable, as is normally the case)
1093 vma->vm_flags &= ~VM_MAYREAD;
1094 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
1096 ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
1097 vma->vm_end - vma->vm_start,
1103 static int mmap_rcvegrbufs(struct vm_area_struct *vma,
1104 struct ipath_portdata *pd)
1106 struct ipath_devdata *dd = pd->port_dd;
1107 unsigned long start, size;
1108 size_t total_size, i;
1112 size = pd->port_rcvegrbuf_size;
1113 total_size = pd->port_rcvegrbuf_chunks * size;
1114 if ((vma->vm_end - vma->vm_start) > total_size) {
1115 dev_info(&dd->pcidev->dev, "FAIL on egr bufs: "
1116 "reqlen %lx > actual %lx\n",
1117 vma->vm_end - vma->vm_start,
1118 (unsigned long) total_size);
1123 if (vma->vm_flags & VM_WRITE) {
1124 dev_info(&dd->pcidev->dev, "Can't map eager buffers as "
1125 "writable (flags=%lx)\n", vma->vm_flags);
1129 /* don't allow them to later change to writeable with mprotect */
1130 vma->vm_flags &= ~VM_MAYWRITE;
1132 start = vma->vm_start;
1134 for (i = 0; i < pd->port_rcvegrbuf_chunks; i++, start += size) {
1135 pfn = virt_to_phys(pd->port_rcvegrbuf[i]) >> PAGE_SHIFT;
1136 ret = remap_pfn_range(vma, start, pfn, size,
1148 * ipath_file_vma_fault - handle a VMA page fault.
1150 static int ipath_file_vma_fault(struct vm_area_struct *vma,
1151 struct vm_fault *vmf)
1155 page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
1157 return VM_FAULT_SIGBUS;
1164 static const struct vm_operations_struct ipath_file_vm_ops = {
1165 .fault = ipath_file_vma_fault,
1168 static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
1169 struct ipath_portdata *pd, unsigned subport)
1172 struct ipath_devdata *dd;
1177 /* If the port is not shared, all addresses should be physical */
1178 if (!pd->port_subport_cnt)
1182 size = pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size;
1185 * Each process has all the subport uregbase, rcvhdrq, and
1186 * rcvegrbufs mmapped - as an array for all the processes,
1187 * and also separately for this process.
1189 if (pgaddr == cvt_kvaddr(pd->subport_uregbase)) {
1190 addr = pd->subport_uregbase;
1191 size = PAGE_SIZE * pd->port_subport_cnt;
1192 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base)) {
1193 addr = pd->subport_rcvhdr_base;
1194 size = pd->port_rcvhdrq_size * pd->port_subport_cnt;
1195 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf)) {
1196 addr = pd->subport_rcvegrbuf;
1197 size *= pd->port_subport_cnt;
1198 } else if (pgaddr == cvt_kvaddr(pd->subport_uregbase +
1199 PAGE_SIZE * subport)) {
1200 addr = pd->subport_uregbase + PAGE_SIZE * subport;
1202 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base +
1203 pd->port_rcvhdrq_size * subport)) {
1204 addr = pd->subport_rcvhdr_base +
1205 pd->port_rcvhdrq_size * subport;
1206 size = pd->port_rcvhdrq_size;
1207 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf +
1209 addr = pd->subport_rcvegrbuf + size * subport;
1210 /* rcvegrbufs are read-only on the slave */
1211 if (vma->vm_flags & VM_WRITE) {
1212 dev_info(&dd->pcidev->dev,
1213 "Can't map eager buffers as "
1214 "writable (flags=%lx)\n", vma->vm_flags);
1219 * Don't allow permission to later change to writeable
1222 vma->vm_flags &= ~VM_MAYWRITE;
1226 len = vma->vm_end - vma->vm_start;
1228 ipath_cdbg(MM, "FAIL: reqlen %lx > %zx\n", len, size);
1233 vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
1234 vma->vm_ops = &ipath_file_vm_ops;
1235 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
1243 * ipath_mmap - mmap various structures into user space
1244 * @fp: the file pointer
1247 * We use this to have a shared buffer between the kernel and the user code
1248 * for the rcvhdr queue, egr buffers, and the per-port user regs and pio
1249 * buffers in the chip. We have the open and close entries so we can bump
1250 * the ref count and keep the driver from being unloaded while still mapped.
1252 static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
1254 struct ipath_portdata *pd;
1255 struct ipath_devdata *dd;
1257 unsigned piobufs, piocnt;
1268 * This is the ipath_do_user_init() code, mapping the shared buffers
1269 * into the user process. The address referred to by vm_pgoff is the
1270 * file offset passed via mmap(). For shared ports, this is the
1271 * kernel vmalloc() address of the pages to share with the master.
1272 * For non-shared or master ports, this is a physical address.
1273 * We only do one mmap for each space mapped.
1275 pgaddr = vma->vm_pgoff << PAGE_SHIFT;
1278 * Check for 0 in case one of the allocations failed, but user
1279 * called mmap anyway.
1286 ipath_cdbg(MM, "pgaddr %llx vm_start=%lx len %lx port %u:%u:%u\n",
1287 (unsigned long long) pgaddr, vma->vm_start,
1288 vma->vm_end - vma->vm_start, dd->ipath_unit,
1289 pd->port_port, subport_fp(fp));
1292 * Physical addresses must fit in 40 bits for our hardware.
1293 * Check for kernel virtual addresses first, anything else must
1294 * match a HW or memory address.
1296 ret = mmap_kvaddr(vma, pgaddr, pd, subport_fp(fp));
1303 ureg = dd->ipath_uregbase + dd->ipath_ureg_align * pd->port_port;
1304 if (!pd->port_subport_cnt) {
1305 /* port is not shared */
1306 piocnt = pd->port_piocnt;
1307 piobufs = pd->port_piobufs;
1308 } else if (!subport_fp(fp)) {
1309 /* caller is the master */
1310 piocnt = (pd->port_piocnt / pd->port_subport_cnt) +
1311 (pd->port_piocnt % pd->port_subport_cnt);
1312 piobufs = pd->port_piobufs +
1313 dd->ipath_palign * (pd->port_piocnt - piocnt);
1315 unsigned slave = subport_fp(fp) - 1;
1317 /* caller is a slave */
1318 piocnt = pd->port_piocnt / pd->port_subport_cnt;
1319 piobufs = pd->port_piobufs + dd->ipath_palign * piocnt * slave;
1323 ret = mmap_ureg(vma, dd, ureg);
1324 else if (pgaddr == piobufs)
1325 ret = mmap_piobufs(vma, dd, pd, piobufs, piocnt);
1326 else if (pgaddr == dd->ipath_pioavailregs_phys)
1327 /* in-memory copy of pioavail registers */
1328 ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
1329 (void *) dd->ipath_pioavailregs_dma,
1330 "pioavail registers");
1331 else if (pgaddr == pd->port_rcvegr_phys)
1332 ret = mmap_rcvegrbufs(vma, pd);
1333 else if (pgaddr == (u64) pd->port_rcvhdrq_phys)
1335 * The rcvhdrq itself; readonly except on HT (so have
1336 * to allow writable mapping), multiple pages, contiguous
1337 * from an i/o perspective.
1339 ret = ipath_mmap_mem(vma, pd, pd->port_rcvhdrq_size, 1,
1342 else if (pgaddr == (u64) pd->port_rcvhdrqtailaddr_phys)
1343 /* in-memory copy of rcvhdrq tail register */
1344 ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
1345 pd->port_rcvhdrtail_kvaddr,
1350 vma->vm_private_data = NULL;
1353 dev_info(&dd->pcidev->dev,
1354 "Failure %d on off %llx len %lx\n",
1355 -ret, (unsigned long long)pgaddr,
1356 vma->vm_end - vma->vm_start);
1361 static unsigned ipath_poll_hdrqfull(struct ipath_portdata *pd)
1363 unsigned pollflag = 0;
1365 if ((pd->poll_type & IPATH_POLL_TYPE_OVERFLOW) &&
1366 pd->port_hdrqfull != pd->port_hdrqfull_poll) {
1367 pollflag |= POLLIN | POLLRDNORM;
1368 pd->port_hdrqfull_poll = pd->port_hdrqfull;
1374 static unsigned int ipath_poll_urgent(struct ipath_portdata *pd,
1376 struct poll_table_struct *pt)
1378 unsigned pollflag = 0;
1379 struct ipath_devdata *dd;
1383 /* variable access in ipath_poll_hdrqfull() needs this */
1385 pollflag = ipath_poll_hdrqfull(pd);
1387 if (pd->port_urgent != pd->port_urgent_poll) {
1388 pollflag |= POLLIN | POLLRDNORM;
1389 pd->port_urgent_poll = pd->port_urgent;
1393 /* this saves a spin_lock/unlock in interrupt handler... */
1394 set_bit(IPATH_PORT_WAITING_URG, &pd->port_flag);
1395 /* flush waiting flag so don't miss an event... */
1397 poll_wait(fp, &pd->port_wait, pt);
1403 static unsigned int ipath_poll_next(struct ipath_portdata *pd,
1405 struct poll_table_struct *pt)
1409 unsigned pollflag = 0;
1410 struct ipath_devdata *dd;
1414 /* variable access in ipath_poll_hdrqfull() needs this */
1416 pollflag = ipath_poll_hdrqfull(pd);
1418 head = ipath_read_ureg32(dd, ur_rcvhdrhead, pd->port_port);
1419 if (pd->port_rcvhdrtail_kvaddr)
1420 tail = ipath_get_rcvhdrtail(pd);
1422 tail = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
1425 pollflag |= POLLIN | POLLRDNORM;
1427 /* this saves a spin_lock/unlock in interrupt handler */
1428 set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag);
1429 /* flush waiting flag so we don't miss an event */
1432 set_bit(pd->port_port + dd->ipath_r_intravail_shift,
1433 &dd->ipath_rcvctrl);
1435 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1438 if (dd->ipath_rhdrhead_intr_off) /* arm rcv interrupt */
1439 ipath_write_ureg(dd, ur_rcvhdrhead,
1440 dd->ipath_rhdrhead_intr_off | head,
1443 poll_wait(fp, &pd->port_wait, pt);
1449 static unsigned int ipath_poll(struct file *fp,
1450 struct poll_table_struct *pt)
1452 struct ipath_portdata *pd;
1458 else if (pd->poll_type & IPATH_POLL_TYPE_URGENT)
1459 pollflag = ipath_poll_urgent(pd, fp, pt);
1461 pollflag = ipath_poll_next(pd, fp, pt);
1466 static int ipath_supports_subports(int user_swmajor, int user_swminor)
1468 /* no subport implementation prior to software version 1.3 */
1469 return (user_swmajor > 1) || (user_swminor >= 3);
1472 static int ipath_compatible_subports(int user_swmajor, int user_swminor)
1474 /* this code is written long-hand for clarity */
1475 if (IPATH_USER_SWMAJOR != user_swmajor) {
1476 /* no promise of compatibility if major mismatch */
1479 if (IPATH_USER_SWMAJOR == 1) {
1480 switch (IPATH_USER_SWMINOR) {
1484 /* no subport implementation so cannot be compatible */
1487 /* 3 is only compatible with itself */
1488 return user_swminor == 3;
1490 /* >= 4 are compatible (or are expected to be) */
1491 return user_swminor >= 4;
1494 /* make no promises yet for future major versions */
1498 static int init_subports(struct ipath_devdata *dd,
1499 struct ipath_portdata *pd,
1500 const struct ipath_user_info *uinfo)
1503 unsigned num_subports;
1507 * If the user is requesting zero subports,
1508 * skip the subport allocation.
1510 if (uinfo->spu_subport_cnt <= 0)
1513 /* Self-consistency check for ipath_compatible_subports() */
1514 if (ipath_supports_subports(IPATH_USER_SWMAJOR, IPATH_USER_SWMINOR) &&
1515 !ipath_compatible_subports(IPATH_USER_SWMAJOR,
1516 IPATH_USER_SWMINOR)) {
1517 dev_info(&dd->pcidev->dev,
1518 "Inconsistent ipath_compatible_subports()\n");
1522 /* Check for subport compatibility */
1523 if (!ipath_compatible_subports(uinfo->spu_userversion >> 16,
1524 uinfo->spu_userversion & 0xffff)) {
1525 dev_info(&dd->pcidev->dev,
1526 "Mismatched user version (%d.%d) and driver "
1527 "version (%d.%d) while port sharing. Ensure "
1528 "that driver and library are from the same "
1530 (int) (uinfo->spu_userversion >> 16),
1531 (int) (uinfo->spu_userversion & 0xffff),
1533 IPATH_USER_SWMINOR);
1536 if (uinfo->spu_subport_cnt > INFINIPATH_MAX_SUBPORT) {
1541 num_subports = uinfo->spu_subport_cnt;
1542 pd->subport_uregbase = vzalloc(PAGE_SIZE * num_subports);
1543 if (!pd->subport_uregbase) {
1547 /* Note: pd->port_rcvhdrq_size isn't initialized yet. */
1548 size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
1549 sizeof(u32), PAGE_SIZE) * num_subports;
1550 pd->subport_rcvhdr_base = vzalloc(size);
1551 if (!pd->subport_rcvhdr_base) {
1556 pd->subport_rcvegrbuf = vzalloc(pd->port_rcvegrbuf_chunks *
1557 pd->port_rcvegrbuf_size *
1559 if (!pd->subport_rcvegrbuf) {
1564 pd->port_subport_cnt = uinfo->spu_subport_cnt;
1565 pd->port_subport_id = uinfo->spu_subport_id;
1566 pd->active_slaves = 1;
1567 set_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag);
1571 vfree(pd->subport_rcvhdr_base);
1573 vfree(pd->subport_uregbase);
1574 pd->subport_uregbase = NULL;
1579 static int try_alloc_port(struct ipath_devdata *dd, int port,
1581 const struct ipath_user_info *uinfo)
1583 struct ipath_portdata *pd;
1586 if (!(pd = dd->ipath_pd[port])) {
1589 pd = kzalloc(sizeof(struct ipath_portdata), GFP_KERNEL);
1592 * Allocate memory for use in ipath_tid_update() just once
1593 * at open, not per call. Reduces cost of expected send
1596 ptmp = kmalloc(dd->ipath_rcvtidcnt * sizeof(u16) +
1597 dd->ipath_rcvtidcnt * sizeof(struct page **),
1600 ipath_dev_err(dd, "Unable to allocate portdata "
1601 "memory, failing open\n");
1607 dd->ipath_pd[port] = pd;
1608 dd->ipath_pd[port]->port_port = port;
1609 dd->ipath_pd[port]->port_dd = dd;
1610 dd->ipath_pd[port]->port_tid_pg_list = ptmp;
1611 init_waitqueue_head(&dd->ipath_pd[port]->port_wait);
1613 if (!pd->port_cnt) {
1614 pd->userversion = uinfo->spu_userversion;
1615 init_user_egr_sizes(pd);
1616 if ((ret = init_subports(dd, pd, uinfo)) != 0)
1618 ipath_cdbg(PROC, "%s[%u] opened unit:port %u:%u\n",
1619 current->comm, current->pid, dd->ipath_unit,
1623 pd->port_pid = get_pid(task_pid(current));
1624 strlcpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
1625 ipath_stats.sps_ports++;
1634 static inline int usable(struct ipath_devdata *dd)
1637 (dd->ipath_flags & IPATH_PRESENT) &&
1638 dd->ipath_kregbase &&
1640 !(dd->ipath_flags & (IPATH_LINKDOWN | IPATH_DISABLED
1644 static int find_free_port(int unit, struct file *fp,
1645 const struct ipath_user_info *uinfo)
1647 struct ipath_devdata *dd = ipath_lookup(unit);
1660 for (i = 1; i < dd->ipath_cfgports; i++) {
1661 ret = try_alloc_port(dd, i, fp, uinfo);
1671 static int find_best_unit(struct file *fp,
1672 const struct ipath_user_info *uinfo)
1674 int ret = 0, i, prefunit = -1, devmax;
1675 int maxofallports, npresent, nup;
1678 devmax = ipath_count_units(&npresent, &nup, &maxofallports);
1681 * This code is present to allow a knowledgeable person to
1682 * specify the layout of processes to processors before opening
1683 * this driver, and then we'll assign the process to the "closest"
1684 * InfiniPath chip to that processor (we assume reasonable connectivity,
1685 * for now). This code assumes that if affinity has been set
1686 * before this point, that at most one cpu is set; for now this
1687 * is reasonable. I check for both cpumask_empty() and cpumask_full(),
1688 * in case some kernel variant sets none of the bits when no
1689 * affinity is set. 2.6.11 and 12 kernels have all present
1690 * cpus set. Some day we'll have to fix it up further to handle
1691 * a cpu subset. This algorithm fails for two HT chips connected
1692 * in tunnel fashion. Eventually this needs real topology
1693 * information. There may be some issues with dual core numbering
1694 * as well. This needs more work prior to release.
1696 if (!cpumask_empty(tsk_cpus_allowed(current)) &&
1697 !cpumask_full(tsk_cpus_allowed(current))) {
1698 int ncpus = num_online_cpus(), curcpu = -1, nset = 0;
1700 for_each_online_cpu(i)
1701 if (cpumask_test_cpu(i, tsk_cpus_allowed(current))) {
1702 ipath_cdbg(PROC, "%s[%u] affinity set for "
1703 "cpu %d/%d\n", current->comm,
1704 current->pid, i, ncpus);
1709 if (curcpu != -1 && nset != ncpus) {
1711 prefunit = curcpu / (ncpus / npresent);
1712 ipath_cdbg(PROC,"%s[%u] %d chips, %d cpus, "
1713 "%d cpus/chip, select unit %d\n",
1714 current->comm, current->pid,
1715 npresent, ncpus, ncpus / npresent,
1722 * user ports start at 1, kernel port is 0
1723 * For now, we do round-robin access across all chips
1727 devmax = prefunit + 1;
1729 for (i = 1; i < maxofallports; i++) {
1730 for (ndev = prefunit != -1 ? prefunit : 0; ndev < devmax;
1732 struct ipath_devdata *dd = ipath_lookup(ndev);
1735 continue; /* can't use this unit */
1736 if (i >= dd->ipath_cfgports)
1738 * Maxed out on users of this unit. Try
1742 ret = try_alloc_port(dd, i, fp, uinfo);
1751 ipath_dbg("No ports available (none initialized "
1755 /* if started above 0, retry from 0 */
1757 "%s[%u] no ports on prefunit "
1758 "%d, clear and re-check\n",
1759 current->comm, current->pid,
1761 devmax = ipath_count_units(NULL, NULL,
1767 ipath_dbg("No ports available\n");
1771 ipath_dbg("No boards found\n");
1778 static int find_shared_port(struct file *fp,
1779 const struct ipath_user_info *uinfo)
1781 int devmax, ndev, i;
1784 devmax = ipath_count_units(NULL, NULL, NULL);
1786 for (ndev = 0; ndev < devmax; ndev++) {
1787 struct ipath_devdata *dd = ipath_lookup(ndev);
1791 for (i = 1; i < dd->ipath_cfgports; i++) {
1792 struct ipath_portdata *pd = dd->ipath_pd[i];
1794 /* Skip ports which are not yet open */
1795 if (!pd || !pd->port_cnt)
1797 /* Skip port if it doesn't match the requested one */
1798 if (pd->port_subport_id != uinfo->spu_subport_id)
1800 /* Verify the sharing process matches the master */
1801 if (pd->port_subport_cnt != uinfo->spu_subport_cnt ||
1802 pd->userversion != uinfo->spu_userversion ||
1803 pd->port_cnt >= pd->port_subport_cnt) {
1808 subport_fp(fp) = pd->port_cnt++;
1809 pd->port_subpid[subport_fp(fp)] =
1810 get_pid(task_pid(current));
1811 tidcursor_fp(fp) = 0;
1812 pd->active_slaves |= 1 << subport_fp(fp);
1814 "%s[%u] %u sharing %s[%u] unit:port %u:%u\n",
1815 current->comm, current->pid,
1817 pd->port_comm, pid_nr(pd->port_pid),
1818 dd->ipath_unit, pd->port_port);
1828 static int ipath_open(struct inode *in, struct file *fp)
1830 /* The real work is performed later in ipath_assign_port() */
1831 fp->private_data = kzalloc(sizeof(struct ipath_filedata), GFP_KERNEL);
1832 return fp->private_data ? 0 : -ENOMEM;
1835 /* Get port early, so can set affinity prior to memory allocation */
1836 static int ipath_assign_port(struct file *fp,
1837 const struct ipath_user_info *uinfo)
1841 unsigned swmajor, swminor;
1843 /* Check to be sure we haven't already initialized this file */
1849 /* for now, if major version is different, bail */
1850 swmajor = uinfo->spu_userversion >> 16;
1851 if (swmajor != IPATH_USER_SWMAJOR) {
1852 ipath_dbg("User major version %d not same as driver "
1853 "major %d\n", uinfo->spu_userversion >> 16,
1854 IPATH_USER_SWMAJOR);
1859 swminor = uinfo->spu_userversion & 0xffff;
1860 if (swminor != IPATH_USER_SWMINOR)
1861 ipath_dbg("User minor version %d not same as driver "
1862 "minor %d\n", swminor, IPATH_USER_SWMINOR);
1864 mutex_lock(&ipath_mutex);
1866 if (ipath_compatible_subports(swmajor, swminor) &&
1867 uinfo->spu_subport_cnt &&
1868 (ret = find_shared_port(fp, uinfo))) {
1874 i_minor = iminor(file_inode(fp)) - IPATH_USER_MINOR_BASE;
1875 ipath_cdbg(VERBOSE, "open on dev %lx (minor %d)\n",
1876 (long)file_inode(fp)->i_rdev, i_minor);
1879 ret = find_free_port(i_minor - 1, fp, uinfo);
1881 ret = find_best_unit(fp, uinfo);
1885 struct ipath_filedata *fd = fp->private_data;
1886 const struct ipath_portdata *pd = fd->pd;
1887 const struct ipath_devdata *dd = pd->port_dd;
1889 fd->pq = ipath_user_sdma_queue_create(&dd->pcidev->dev,
1898 mutex_unlock(&ipath_mutex);
1905 static int ipath_do_user_init(struct file *fp,
1906 const struct ipath_user_info *uinfo)
1909 struct ipath_portdata *pd = port_fp(fp);
1910 struct ipath_devdata *dd;
1913 /* Subports don't need to initialize anything since master did it. */
1914 if (subport_fp(fp)) {
1915 ret = wait_event_interruptible(pd->port_wait,
1916 !test_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag));
1922 if (uinfo->spu_rcvhdrsize) {
1923 ret = ipath_setrcvhdrsize(dd, uinfo->spu_rcvhdrsize);
1928 /* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */
1930 /* some ports may get extra buffers, calculate that here */
1931 if (pd->port_port <= dd->ipath_ports_extrabuf)
1932 pd->port_piocnt = dd->ipath_pbufsport + 1;
1934 pd->port_piocnt = dd->ipath_pbufsport;
1936 /* for right now, kernel piobufs are at end, so port 1 is at 0 */
1937 if (pd->port_port <= dd->ipath_ports_extrabuf)
1938 pd->port_pio_base = (dd->ipath_pbufsport + 1)
1939 * (pd->port_port - 1);
1941 pd->port_pio_base = dd->ipath_ports_extrabuf +
1942 dd->ipath_pbufsport * (pd->port_port - 1);
1943 pd->port_piobufs = dd->ipath_piobufbase +
1944 pd->port_pio_base * dd->ipath_palign;
1945 ipath_cdbg(VERBOSE, "piobuf base for port %u is 0x%x, piocnt %u,"
1946 " first pio %u\n", pd->port_port, pd->port_piobufs,
1947 pd->port_piocnt, pd->port_pio_base);
1948 ipath_chg_pioavailkernel(dd, pd->port_pio_base, pd->port_piocnt, 0);
1951 * Now allocate the rcvhdr Q and eager TIDs; skip the TID
1952 * array for time being. If pd->port_port > chip-supported,
1953 * we need to do extra stuff here to handle by handling overflow
1954 * through port 0, someday
1956 ret = ipath_create_rcvhdrq(dd, pd);
1958 ret = ipath_create_user_egr(pd);
1963 * set the eager head register for this port to the current values
1964 * of the tail pointers, since we don't know if they were
1965 * updated on last use of the port.
1967 head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port);
1968 ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port);
1969 pd->port_lastrcvhdrqtail = -1;
1970 ipath_cdbg(VERBOSE, "Wrote port%d egrhead %x from tail regs\n",
1971 pd->port_port, head32);
1972 pd->port_tidcursor = 0; /* start at beginning after open */
1974 /* initialize poll variables... */
1975 pd->port_urgent = 0;
1976 pd->port_urgent_poll = 0;
1977 pd->port_hdrqfull_poll = pd->port_hdrqfull;
1980 * Now enable the port for receive.
1981 * For chips that are set to DMA the tail register to memory
1982 * when they change (and when the update bit transitions from
1983 * 0 to 1. So for those chips, we turn it off and then back on.
1984 * This will (very briefly) affect any other open ports, but the
1985 * duration is very short, and therefore isn't an issue. We
1986 * explicitly set the in-memory tail copy to 0 beforehand, so we
1987 * don't have to wait to be sure the DMA update has happened
1988 * (chip resets head/tail to 0 on transition to enable).
1990 set_bit(dd->ipath_r_portenable_shift + pd->port_port,
1991 &dd->ipath_rcvctrl);
1992 if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
1993 if (pd->port_rcvhdrtail_kvaddr)
1994 ipath_clear_rcvhdrtail(pd);
1995 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1997 ~(1ULL << dd->ipath_r_tailupd_shift));
1999 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
2001 /* Notify any waiting slaves */
2002 if (pd->port_subport_cnt) {
2003 clear_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag);
2004 wake_up(&pd->port_wait);
2011 * unlock_exptid - unlock any expected TID entries port still had in use
2014 * We don't actually update the chip here, because we do a bulk update
2015 * below, using ipath_f_clear_tids.
2017 static void unlock_expected_tids(struct ipath_portdata *pd)
2019 struct ipath_devdata *dd = pd->port_dd;
2020 int port_tidbase = pd->port_port * dd->ipath_rcvtidcnt;
2021 int i, cnt = 0, maxtid = port_tidbase + dd->ipath_rcvtidcnt;
2023 ipath_cdbg(VERBOSE, "Port %u unlocking any locked expTID pages\n",
2025 for (i = port_tidbase; i < maxtid; i++) {
2026 struct page *ps = dd->ipath_pageshadow[i];
2031 dd->ipath_pageshadow[i] = NULL;
2032 pci_unmap_page(dd->pcidev, dd->ipath_physshadow[i],
2033 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2034 ipath_release_user_pages_on_close(&ps, 1);
2036 ipath_stats.sps_pageunlocks++;
2039 ipath_cdbg(VERBOSE, "Port %u locked %u expTID entries\n",
2040 pd->port_port, cnt);
2042 if (ipath_stats.sps_pagelocks || ipath_stats.sps_pageunlocks)
2043 ipath_cdbg(VERBOSE, "%llu pages locked, %llu unlocked\n",
2044 (unsigned long long) ipath_stats.sps_pagelocks,
2045 (unsigned long long)
2046 ipath_stats.sps_pageunlocks);
2049 static int ipath_close(struct inode *in, struct file *fp)
2051 struct ipath_filedata *fd;
2052 struct ipath_portdata *pd;
2053 struct ipath_devdata *dd;
2054 unsigned long flags;
2058 ipath_cdbg(VERBOSE, "close on dev %lx, private data %p\n",
2059 (long)in->i_rdev, fp->private_data);
2061 mutex_lock(&ipath_mutex);
2063 fd = fp->private_data;
2064 fp->private_data = NULL;
2067 mutex_unlock(&ipath_mutex);
2073 /* drain user sdma queue */
2074 ipath_user_sdma_queue_drain(dd, fd->pq);
2075 ipath_user_sdma_queue_destroy(fd->pq);
2077 if (--pd->port_cnt) {
2079 * XXX If the master closes the port before the slave(s),
2080 * revoke the mmap for the eager receive queue so
2081 * the slave(s) don't wait for receive data forever.
2083 pd->active_slaves &= ~(1 << fd->subport);
2084 put_pid(pd->port_subpid[fd->subport]);
2085 pd->port_subpid[fd->subport] = NULL;
2086 mutex_unlock(&ipath_mutex);
2089 /* early; no interrupt users after this */
2090 spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
2091 port = pd->port_port;
2092 dd->ipath_pd[port] = NULL;
2094 pd->port_pid = NULL;
2095 spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
2097 if (pd->port_rcvwait_to || pd->port_piowait_to
2098 || pd->port_rcvnowait || pd->port_pionowait) {
2099 ipath_cdbg(VERBOSE, "port%u, %u rcv, %u pio wait timeo; "
2100 "%u rcv %u, pio already\n",
2101 pd->port_port, pd->port_rcvwait_to,
2102 pd->port_piowait_to, pd->port_rcvnowait,
2103 pd->port_pionowait);
2104 pd->port_rcvwait_to = pd->port_piowait_to =
2105 pd->port_rcvnowait = pd->port_pionowait = 0;
2107 if (pd->port_flag) {
2108 ipath_cdbg(PROC, "port %u port_flag set: 0x%lx\n",
2109 pd->port_port, pd->port_flag);
2113 if (dd->ipath_kregbase) {
2114 /* atomically clear receive enable port and intr avail. */
2115 clear_bit(dd->ipath_r_portenable_shift + port,
2116 &dd->ipath_rcvctrl);
2117 clear_bit(pd->port_port + dd->ipath_r_intravail_shift,
2118 &dd->ipath_rcvctrl);
2119 ipath_write_kreg( dd, dd->ipath_kregs->kr_rcvctrl,
2121 /* and read back from chip to be sure that nothing
2122 * else is in flight when we do the rest */
2123 (void)ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
2125 /* clean up the pkeys for this port user */
2126 ipath_clean_part_key(pd, dd);
2128 * be paranoid, and never write 0's to these, just use an
2129 * unused part of the port 0 tail page. Of course,
2130 * rcvhdraddr points to a large chunk of memory, so this
2131 * could still trash things, but at least it won't trash
2132 * page 0, and by disabling the port, it should stop "soon",
2133 * even if a packet or two is in already in flight after we
2134 * disabled the port.
2136 ipath_write_kreg_port(dd,
2137 dd->ipath_kregs->kr_rcvhdrtailaddr, port,
2138 dd->ipath_dummy_hdrq_phys);
2139 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
2140 pd->port_port, dd->ipath_dummy_hdrq_phys);
2142 ipath_disarm_piobufs(dd, pd->port_pio_base, pd->port_piocnt);
2143 ipath_chg_pioavailkernel(dd, pd->port_pio_base,
2144 pd->port_piocnt, 1);
2146 dd->ipath_f_clear_tids(dd, pd->port_port);
2148 if (dd->ipath_pageshadow)
2149 unlock_expected_tids(pd);
2150 ipath_stats.sps_ports--;
2151 ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n",
2152 pd->port_comm, pid_nr(pid),
2153 dd->ipath_unit, port);
2157 mutex_unlock(&ipath_mutex);
2158 ipath_free_pddata(dd, pd); /* after releasing the mutex */
2165 static int ipath_port_info(struct ipath_portdata *pd, u16 subport,
2166 struct ipath_port_info __user *uinfo)
2168 struct ipath_port_info info;
2173 (void) ipath_count_units(NULL, &nup, NULL);
2174 info.num_active = nup;
2175 info.unit = pd->port_dd->ipath_unit;
2176 info.port = pd->port_port;
2177 info.subport = subport;
2178 /* Don't return new fields if old library opened the port. */
2179 if (ipath_supports_subports(pd->userversion >> 16,
2180 pd->userversion & 0xffff)) {
2181 /* Number of user ports available for this device. */
2182 info.num_ports = pd->port_dd->ipath_cfgports - 1;
2183 info.num_subports = pd->port_subport_cnt;
2186 sz = sizeof(info) - 2 * sizeof(u16);
2188 if (copy_to_user(uinfo, &info, sz)) {
2198 static int ipath_get_slave_info(struct ipath_portdata *pd,
2199 void __user *slave_mask_addr)
2203 if (copy_to_user(slave_mask_addr, &pd->active_slaves, sizeof(u32)))
2208 static int ipath_sdma_get_inflight(struct ipath_user_sdma_queue *pq,
2209 u32 __user *inflightp)
2211 const u32 val = ipath_user_sdma_inflight_counter(pq);
2213 if (put_user(val, inflightp))
2219 static int ipath_sdma_get_complete(struct ipath_devdata *dd,
2220 struct ipath_user_sdma_queue *pq,
2221 u32 __user *completep)
2226 err = ipath_user_sdma_make_progress(dd, pq);
2230 val = ipath_user_sdma_complete_counter(pq);
2231 if (put_user(val, completep))
2237 static ssize_t ipath_write(struct file *fp, const char __user *data,
2238 size_t count, loff_t *off)
2240 const struct ipath_cmd __user *ucmd;
2241 struct ipath_portdata *pd;
2242 const void __user *src;
2243 size_t consumed, copy;
2244 struct ipath_cmd cmd;
2248 if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
2251 if (count < sizeof(cmd.type)) {
2256 ucmd = (const struct ipath_cmd __user *) data;
2258 if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) {
2263 consumed = sizeof(cmd.type);
2266 case IPATH_CMD_ASSIGN_PORT:
2267 case __IPATH_CMD_USER_INIT:
2268 case IPATH_CMD_USER_INIT:
2269 copy = sizeof(cmd.cmd.user_info);
2270 dest = &cmd.cmd.user_info;
2271 src = &ucmd->cmd.user_info;
2273 case IPATH_CMD_RECV_CTRL:
2274 copy = sizeof(cmd.cmd.recv_ctrl);
2275 dest = &cmd.cmd.recv_ctrl;
2276 src = &ucmd->cmd.recv_ctrl;
2278 case IPATH_CMD_PORT_INFO:
2279 copy = sizeof(cmd.cmd.port_info);
2280 dest = &cmd.cmd.port_info;
2281 src = &ucmd->cmd.port_info;
2283 case IPATH_CMD_TID_UPDATE:
2284 case IPATH_CMD_TID_FREE:
2285 copy = sizeof(cmd.cmd.tid_info);
2286 dest = &cmd.cmd.tid_info;
2287 src = &ucmd->cmd.tid_info;
2289 case IPATH_CMD_SET_PART_KEY:
2290 copy = sizeof(cmd.cmd.part_key);
2291 dest = &cmd.cmd.part_key;
2292 src = &ucmd->cmd.part_key;
2294 case __IPATH_CMD_SLAVE_INFO:
2295 copy = sizeof(cmd.cmd.slave_mask_addr);
2296 dest = &cmd.cmd.slave_mask_addr;
2297 src = &ucmd->cmd.slave_mask_addr;
2299 case IPATH_CMD_PIOAVAILUPD: // force an update of PIOAvail reg
2304 case IPATH_CMD_POLL_TYPE:
2305 copy = sizeof(cmd.cmd.poll_type);
2306 dest = &cmd.cmd.poll_type;
2307 src = &ucmd->cmd.poll_type;
2309 case IPATH_CMD_ARMLAUNCH_CTRL:
2310 copy = sizeof(cmd.cmd.armlaunch_ctrl);
2311 dest = &cmd.cmd.armlaunch_ctrl;
2312 src = &ucmd->cmd.armlaunch_ctrl;
2314 case IPATH_CMD_SDMA_INFLIGHT:
2315 copy = sizeof(cmd.cmd.sdma_inflight);
2316 dest = &cmd.cmd.sdma_inflight;
2317 src = &ucmd->cmd.sdma_inflight;
2319 case IPATH_CMD_SDMA_COMPLETE:
2320 copy = sizeof(cmd.cmd.sdma_complete);
2321 dest = &cmd.cmd.sdma_complete;
2322 src = &ucmd->cmd.sdma_complete;
2330 if ((count - consumed) < copy) {
2335 if (copy_from_user(dest, src, copy)) {
2344 if (!pd && cmd.type != __IPATH_CMD_USER_INIT &&
2345 cmd.type != IPATH_CMD_ASSIGN_PORT) {
2351 case IPATH_CMD_ASSIGN_PORT:
2352 ret = ipath_assign_port(fp, &cmd.cmd.user_info);
2356 case __IPATH_CMD_USER_INIT:
2357 /* backwards compatibility, get port first */
2358 ret = ipath_assign_port(fp, &cmd.cmd.user_info);
2361 /* and fall through to current version. */
2362 case IPATH_CMD_USER_INIT:
2363 ret = ipath_do_user_init(fp, &cmd.cmd.user_info);
2366 ret = ipath_get_base_info(
2367 fp, (void __user *) (unsigned long)
2368 cmd.cmd.user_info.spu_base_info,
2369 cmd.cmd.user_info.spu_base_info_size);
2371 case IPATH_CMD_RECV_CTRL:
2372 ret = ipath_manage_rcvq(pd, subport_fp(fp), cmd.cmd.recv_ctrl);
2374 case IPATH_CMD_PORT_INFO:
2375 ret = ipath_port_info(pd, subport_fp(fp),
2376 (struct ipath_port_info __user *)
2377 (unsigned long) cmd.cmd.port_info);
2379 case IPATH_CMD_TID_UPDATE:
2380 ret = ipath_tid_update(pd, fp, &cmd.cmd.tid_info);
2382 case IPATH_CMD_TID_FREE:
2383 ret = ipath_tid_free(pd, subport_fp(fp), &cmd.cmd.tid_info);
2385 case IPATH_CMD_SET_PART_KEY:
2386 ret = ipath_set_part_key(pd, cmd.cmd.part_key);
2388 case __IPATH_CMD_SLAVE_INFO:
2389 ret = ipath_get_slave_info(pd,
2390 (void __user *) (unsigned long)
2391 cmd.cmd.slave_mask_addr);
2393 case IPATH_CMD_PIOAVAILUPD:
2394 ipath_force_pio_avail_update(pd->port_dd);
2396 case IPATH_CMD_POLL_TYPE:
2397 pd->poll_type = cmd.cmd.poll_type;
2399 case IPATH_CMD_ARMLAUNCH_CTRL:
2400 if (cmd.cmd.armlaunch_ctrl)
2401 ipath_enable_armlaunch(pd->port_dd);
2403 ipath_disable_armlaunch(pd->port_dd);
2405 case IPATH_CMD_SDMA_INFLIGHT:
2406 ret = ipath_sdma_get_inflight(user_sdma_queue_fp(fp),
2407 (u32 __user *) (unsigned long)
2408 cmd.cmd.sdma_inflight);
2410 case IPATH_CMD_SDMA_COMPLETE:
2411 ret = ipath_sdma_get_complete(pd->port_dd,
2412 user_sdma_queue_fp(fp),
2413 (u32 __user *) (unsigned long)
2414 cmd.cmd.sdma_complete);
2425 static ssize_t ipath_write_iter(struct kiocb *iocb, struct iov_iter *from)
2427 struct file *filp = iocb->ki_filp;
2428 struct ipath_filedata *fp = filp->private_data;
2429 struct ipath_portdata *pd = port_fp(filp);
2430 struct ipath_user_sdma_queue *pq = fp->pq;
2432 if (!iter_is_iovec(from) || !from->nr_segs)
2435 return ipath_user_sdma_writev(pd->port_dd, pq, from->iov, from->nr_segs);
2438 static struct class *ipath_class;
2440 static int init_cdev(int minor, char *name, const struct file_operations *fops,
2441 struct cdev **cdevp, struct device **devp)
2443 const dev_t dev = MKDEV(IPATH_MAJOR, minor);
2444 struct cdev *cdev = NULL;
2445 struct device *device = NULL;
2448 cdev = cdev_alloc();
2450 printk(KERN_ERR IPATH_DRV_NAME
2451 ": Could not allocate cdev for minor %d, %s\n",
2457 cdev->owner = THIS_MODULE;
2459 kobject_set_name(&cdev->kobj, name);
2461 ret = cdev_add(cdev, dev, 1);
2463 printk(KERN_ERR IPATH_DRV_NAME
2464 ": Could not add cdev for minor %d, %s (err %d)\n",
2469 device = device_create(ipath_class, NULL, dev, NULL, name);
2471 if (IS_ERR(device)) {
2472 ret = PTR_ERR(device);
2473 printk(KERN_ERR IPATH_DRV_NAME ": Could not create "
2474 "device for minor %d, %s (err %d)\n",
2497 int ipath_cdev_init(int minor, char *name, const struct file_operations *fops,
2498 struct cdev **cdevp, struct device **devp)
2500 return init_cdev(minor, name, fops, cdevp, devp);
2503 static void cleanup_cdev(struct cdev **cdevp,
2504 struct device **devp)
2506 struct device *dev = *devp;
2509 device_unregister(dev);
2519 void ipath_cdev_cleanup(struct cdev **cdevp,
2520 struct device **devp)
2522 cleanup_cdev(cdevp, devp);
2525 static struct cdev *wildcard_cdev;
2526 static struct device *wildcard_dev;
2528 static const dev_t dev = MKDEV(IPATH_MAJOR, 0);
2530 static int user_init(void)
2534 ret = register_chrdev_region(dev, IPATH_NMINORS, IPATH_DRV_NAME);
2536 printk(KERN_ERR IPATH_DRV_NAME ": Could not register "
2537 "chrdev region (err %d)\n", -ret);
2541 ipath_class = class_create(THIS_MODULE, IPATH_DRV_NAME);
2543 if (IS_ERR(ipath_class)) {
2544 ret = PTR_ERR(ipath_class);
2545 printk(KERN_ERR IPATH_DRV_NAME ": Could not create "
2546 "device class (err %d)\n", -ret);
2552 unregister_chrdev_region(dev, IPATH_NMINORS);
2557 static void user_cleanup(void)
2560 class_destroy(ipath_class);
2564 unregister_chrdev_region(dev, IPATH_NMINORS);
2567 static atomic_t user_count = ATOMIC_INIT(0);
2568 static atomic_t user_setup = ATOMIC_INIT(0);
2570 int ipath_user_add(struct ipath_devdata *dd)
2575 if (atomic_inc_return(&user_count) == 1) {
2578 ipath_dev_err(dd, "Unable to set up user support: "
2579 "error %d\n", -ret);
2582 ret = init_cdev(0, "ipath", &ipath_file_ops, &wildcard_cdev,
2585 ipath_dev_err(dd, "Could not create wildcard "
2586 "minor: error %d\n", -ret);
2590 atomic_set(&user_setup, 1);
2593 snprintf(name, sizeof(name), "ipath%d", dd->ipath_unit);
2595 ret = init_cdev(dd->ipath_unit + 1, name, &ipath_file_ops,
2596 &dd->user_cdev, &dd->user_dev);
2598 ipath_dev_err(dd, "Could not create user minor %d, %s\n",
2599 dd->ipath_unit + 1, name);
2609 void ipath_user_remove(struct ipath_devdata *dd)
2611 cleanup_cdev(&dd->user_cdev, &dd->user_dev);
2613 if (atomic_dec_return(&user_count) == 0) {
2614 if (atomic_read(&user_setup) == 0)
2617 cleanup_cdev(&wildcard_cdev, &wildcard_dev);
2620 atomic_set(&user_setup, 0);