GNU Linux-libre 4.14.266-gnu1
[releases.git] / drivers / infiniband / hw / hfi1 / user_exp_rcv.c
1 /*
2  * Copyright(c) 2015-2017 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 #include <asm/page.h>
48 #include <linux/string.h>
49
50 #include "mmu_rb.h"
51 #include "user_exp_rcv.h"
52 #include "trace.h"
53
54 static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
55                             struct exp_tid_set *set,
56                             struct hfi1_filedata *fd);
57 static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages);
58 static int set_rcvarray_entry(struct hfi1_filedata *fd,
59                               struct tid_user_buf *tbuf,
60                               u32 rcventry, struct tid_group *grp,
61                               u16 pageidx, unsigned int npages);
62 static int tid_rb_insert(void *arg, struct mmu_rb_node *node);
63 static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
64                                     struct tid_rb_node *tnode);
65 static void tid_rb_remove(void *arg, struct mmu_rb_node *node);
66 static int tid_rb_invalidate(void *arg, struct mmu_rb_node *mnode);
67 static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *,
68                             struct tid_group *grp,
69                             unsigned int start, u16 count,
70                             u32 *tidlist, unsigned int *tididx,
71                             unsigned int *pmapped);
72 static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
73                               struct tid_group **grp);
74 static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node);
75
76 static struct mmu_rb_ops tid_rb_ops = {
77         .insert = tid_rb_insert,
78         .remove = tid_rb_remove,
79         .invalidate = tid_rb_invalidate
80 };
81
82 /*
83  * Initialize context and file private data needed for Expected
84  * receive caching. This needs to be done after the context has
85  * been configured with the eager/expected RcvEntry counts.
86  */
87 int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
88                            struct hfi1_ctxtdata *uctxt)
89 {
90         struct hfi1_devdata *dd = uctxt->dd;
91         int ret = 0;
92
93         fd->entry_to_rb = kcalloc(uctxt->expected_count,
94                                   sizeof(struct rb_node *),
95                                   GFP_KERNEL);
96         if (!fd->entry_to_rb)
97                 return -ENOMEM;
98
99         if (!HFI1_CAP_UGET_MASK(uctxt->flags, TID_UNMAP)) {
100                 fd->invalid_tid_idx = 0;
101                 fd->invalid_tids = kcalloc(uctxt->expected_count,
102                                            sizeof(*fd->invalid_tids),
103                                            GFP_KERNEL);
104                 if (!fd->invalid_tids) {
105                         kfree(fd->entry_to_rb);
106                         fd->entry_to_rb = NULL;
107                         return -ENOMEM;
108                 }
109
110                 /*
111                  * Register MMU notifier callbacks. If the registration
112                  * fails, continue without TID caching for this context.
113                  */
114                 ret = hfi1_mmu_rb_register(fd, fd->mm, &tid_rb_ops,
115                                            dd->pport->hfi1_wq,
116                                            &fd->handler);
117                 if (ret) {
118                         dd_dev_info(dd,
119                                     "Failed MMU notifier registration %d\n",
120                                     ret);
121                         ret = 0;
122                 }
123         }
124
125         /*
126          * PSM does not have a good way to separate, count, and
127          * effectively enforce a limit on RcvArray entries used by
128          * subctxts (when context sharing is used) when TID caching
129          * is enabled. To help with that, we calculate a per-process
130          * RcvArray entry share and enforce that.
131          * If TID caching is not in use, PSM deals with usage on its
132          * own. In that case, we allow any subctxt to take all of the
133          * entries.
134          *
135          * Make sure that we set the tid counts only after successful
136          * init.
137          */
138         spin_lock(&fd->tid_lock);
139         if (uctxt->subctxt_cnt && fd->handler) {
140                 u16 remainder;
141
142                 fd->tid_limit = uctxt->expected_count / uctxt->subctxt_cnt;
143                 remainder = uctxt->expected_count % uctxt->subctxt_cnt;
144                 if (remainder && fd->subctxt < remainder)
145                         fd->tid_limit++;
146         } else {
147                 fd->tid_limit = uctxt->expected_count;
148         }
149         spin_unlock(&fd->tid_lock);
150
151         return ret;
152 }
153
154 void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
155 {
156         struct hfi1_ctxtdata *uctxt = fd->uctxt;
157
158         /*
159          * The notifier would have been removed when the process'es mm
160          * was freed.
161          */
162         if (fd->handler) {
163                 hfi1_mmu_rb_unregister(fd->handler);
164         } else {
165                 if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
166                         unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd);
167                 if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
168                         unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd);
169         }
170
171         kfree(fd->invalid_tids);
172         fd->invalid_tids = NULL;
173
174         kfree(fd->entry_to_rb);
175         fd->entry_to_rb = NULL;
176 }
177
178 /**
179  * Release pinned receive buffer pages.
180  *
181  * @mapped - true if the pages have been DMA mapped. false otherwise.
182  * @idx - Index of the first page to unpin.
183  * @npages - No of pages to unpin.
184  *
185  * If the pages have been DMA mapped (indicated by mapped parameter), their
186  * info will be passed via a struct tid_rb_node. If they haven't been mapped,
187  * their info will be passed via a struct tid_user_buf.
188  */
189 static void unpin_rcv_pages(struct hfi1_filedata *fd,
190                             struct tid_user_buf *tidbuf,
191                             struct tid_rb_node *node,
192                             unsigned int idx,
193                             unsigned int npages,
194                             bool mapped)
195 {
196         struct page **pages;
197         struct hfi1_devdata *dd = fd->uctxt->dd;
198
199         if (mapped) {
200                 pci_unmap_single(dd->pcidev, node->dma_addr,
201                                  node->mmu.len, PCI_DMA_FROMDEVICE);
202                 pages = &node->pages[idx];
203         } else {
204                 pages = &tidbuf->pages[idx];
205         }
206         hfi1_release_user_pages(fd->mm, pages, npages, mapped);
207         fd->tid_n_pinned -= npages;
208 }
209
210 /**
211  * Pin receive buffer pages.
212  */
213 static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
214 {
215         int pinned;
216         unsigned int npages;
217         unsigned long vaddr = tidbuf->vaddr;
218         struct page **pages = NULL;
219         struct hfi1_devdata *dd = fd->uctxt->dd;
220
221         /* Get the number of pages the user buffer spans */
222         npages = num_user_pages(vaddr, tidbuf->length);
223         if (!npages)
224                 return -EINVAL;
225
226         if (npages > fd->uctxt->expected_count) {
227                 dd_dev_err(dd, "Expected buffer too big\n");
228                 return -EINVAL;
229         }
230
231         /* Verify that access is OK for the user buffer */
232         if (!access_ok(VERIFY_WRITE, (void __user *)vaddr,
233                        npages * PAGE_SIZE)) {
234                 dd_dev_err(dd, "Fail vaddr %p, %u pages, !access_ok\n",
235                            (void *)vaddr, npages);
236                 return -EFAULT;
237         }
238         /* Allocate the array of struct page pointers needed for pinning */
239         pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
240         if (!pages)
241                 return -ENOMEM;
242
243         /*
244          * Pin all the pages of the user buffer. If we can't pin all the
245          * pages, accept the amount pinned so far and program only that.
246          * User space knows how to deal with partially programmed buffers.
247          */
248         if (!hfi1_can_pin_pages(dd, fd->mm, fd->tid_n_pinned, npages)) {
249                 kfree(pages);
250                 return -ENOMEM;
251         }
252
253         pinned = hfi1_acquire_user_pages(fd->mm, vaddr, npages, true, pages);
254         if (pinned <= 0) {
255                 kfree(pages);
256                 return pinned;
257         }
258         tidbuf->pages = pages;
259         tidbuf->npages = npages;
260         fd->tid_n_pinned += pinned;
261         return pinned;
262 }
263
264 /*
265  * RcvArray entry allocation for Expected Receives is done by the
266  * following algorithm:
267  *
268  * The context keeps 3 lists of groups of RcvArray entries:
269  *   1. List of empty groups - tid_group_list
270  *      This list is created during user context creation and
271  *      contains elements which describe sets (of 8) of empty
272  *      RcvArray entries.
273  *   2. List of partially used groups - tid_used_list
274  *      This list contains sets of RcvArray entries which are
275  *      not completely used up. Another mapping request could
276  *      use some of all of the remaining entries.
277  *   3. List of full groups - tid_full_list
278  *      This is the list where sets that are completely used
279  *      up go.
280  *
281  * An attempt to optimize the usage of RcvArray entries is
282  * made by finding all sets of physically contiguous pages in a
283  * user's buffer.
284  * These physically contiguous sets are further split into
285  * sizes supported by the receive engine of the HFI. The
286  * resulting sets of pages are stored in struct tid_pageset,
287  * which describes the sets as:
288  *    * .count - number of pages in this set
289  *    * .idx - starting index into struct page ** array
290  *                    of this set
291  *
292  * From this point on, the algorithm deals with the page sets
293  * described above. The number of pagesets is divided by the
294  * RcvArray group size to produce the number of full groups
295  * needed.
296  *
297  * Groups from the 3 lists are manipulated using the following
298  * rules:
299  *   1. For each set of 8 pagesets, a complete group from
300  *      tid_group_list is taken, programmed, and moved to
301  *      the tid_full_list list.
302  *   2. For all remaining pagesets:
303  *      2.1 If the tid_used_list is empty and the tid_group_list
304  *          is empty, stop processing pageset and return only
305  *          what has been programmed up to this point.
306  *      2.2 If the tid_used_list is empty and the tid_group_list
307  *          is not empty, move a group from tid_group_list to
308  *          tid_used_list.
309  *      2.3 For each group is tid_used_group, program as much as
310  *          can fit into the group. If the group becomes fully
311  *          used, move it to tid_full_list.
312  */
313 int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
314                             struct hfi1_tid_info *tinfo)
315 {
316         int ret = 0, need_group = 0, pinned;
317         struct hfi1_ctxtdata *uctxt = fd->uctxt;
318         struct hfi1_devdata *dd = uctxt->dd;
319         unsigned int ngroups, pageidx = 0, pageset_count,
320                 tididx = 0, mapped, mapped_pages = 0;
321         u32 *tidlist = NULL;
322         struct tid_user_buf *tidbuf;
323
324         if (!PAGE_ALIGNED(tinfo->vaddr))
325                 return -EINVAL;
326
327         tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL);
328         if (!tidbuf)
329                 return -ENOMEM;
330
331         tidbuf->vaddr = tinfo->vaddr;
332         tidbuf->length = tinfo->length;
333         tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets),
334                                 GFP_KERNEL);
335         if (!tidbuf->psets) {
336                 kfree(tidbuf);
337                 return -ENOMEM;
338         }
339
340         pinned = pin_rcv_pages(fd, tidbuf);
341         if (pinned <= 0) {
342                 kfree(tidbuf->psets);
343                 kfree(tidbuf);
344                 return pinned;
345         }
346
347         /* Find sets of physically contiguous pages */
348         tidbuf->n_psets = find_phys_blocks(tidbuf, pinned);
349
350         /*
351          * We don't need to access this under a lock since tid_used is per
352          * process and the same process cannot be in hfi1_user_exp_rcv_clear()
353          * and hfi1_user_exp_rcv_setup() at the same time.
354          */
355         spin_lock(&fd->tid_lock);
356         if (fd->tid_used + tidbuf->n_psets > fd->tid_limit)
357                 pageset_count = fd->tid_limit - fd->tid_used;
358         else
359                 pageset_count = tidbuf->n_psets;
360         spin_unlock(&fd->tid_lock);
361
362         if (!pageset_count)
363                 goto bail;
364
365         ngroups = pageset_count / dd->rcv_entries.group_size;
366         tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL);
367         if (!tidlist) {
368                 ret = -ENOMEM;
369                 goto nomem;
370         }
371
372         tididx = 0;
373
374         /*
375          * From this point on, we are going to be using shared (between master
376          * and subcontexts) context resources. We need to take the lock.
377          */
378         mutex_lock(&uctxt->exp_lock);
379         /*
380          * The first step is to program the RcvArray entries which are complete
381          * groups.
382          */
383         while (ngroups && uctxt->tid_group_list.count) {
384                 struct tid_group *grp =
385                         tid_group_pop(&uctxt->tid_group_list);
386
387                 ret = program_rcvarray(fd, tidbuf, grp,
388                                        pageidx, dd->rcv_entries.group_size,
389                                        tidlist, &tididx, &mapped);
390                 /*
391                  * If there was a failure to program the RcvArray
392                  * entries for the entire group, reset the grp fields
393                  * and add the grp back to the free group list.
394                  */
395                 if (ret <= 0) {
396                         tid_group_add_tail(grp, &uctxt->tid_group_list);
397                         hfi1_cdbg(TID,
398                                   "Failed to program RcvArray group %d", ret);
399                         goto unlock;
400                 }
401
402                 tid_group_add_tail(grp, &uctxt->tid_full_list);
403                 ngroups--;
404                 pageidx += ret;
405                 mapped_pages += mapped;
406         }
407
408         while (pageidx < pageset_count) {
409                 struct tid_group *grp, *ptr;
410                 /*
411                  * If we don't have any partially used tid groups, check
412                  * if we have empty groups. If so, take one from there and
413                  * put in the partially used list.
414                  */
415                 if (!uctxt->tid_used_list.count || need_group) {
416                         if (!uctxt->tid_group_list.count)
417                                 goto unlock;
418
419                         grp = tid_group_pop(&uctxt->tid_group_list);
420                         tid_group_add_tail(grp, &uctxt->tid_used_list);
421                         need_group = 0;
422                 }
423                 /*
424                  * There is an optimization opportunity here - instead of
425                  * fitting as many page sets as we can, check for a group
426                  * later on in the list that could fit all of them.
427                  */
428                 list_for_each_entry_safe(grp, ptr, &uctxt->tid_used_list.list,
429                                          list) {
430                         unsigned use = min_t(unsigned, pageset_count - pageidx,
431                                              grp->size - grp->used);
432
433                         ret = program_rcvarray(fd, tidbuf, grp,
434                                                pageidx, use, tidlist,
435                                                &tididx, &mapped);
436                         if (ret < 0) {
437                                 hfi1_cdbg(TID,
438                                           "Failed to program RcvArray entries %d",
439                                           ret);
440                                 ret = -EFAULT;
441                                 goto unlock;
442                         } else if (ret > 0) {
443                                 if (grp->used == grp->size)
444                                         tid_group_move(grp,
445                                                        &uctxt->tid_used_list,
446                                                        &uctxt->tid_full_list);
447                                 pageidx += ret;
448                                 mapped_pages += mapped;
449                                 need_group = 0;
450                                 /* Check if we are done so we break out early */
451                                 if (pageidx >= pageset_count)
452                                         break;
453                         } else if (WARN_ON(ret == 0)) {
454                                 /*
455                                  * If ret is 0, we did not program any entries
456                                  * into this group, which can only happen if
457                                  * we've screwed up the accounting somewhere.
458                                  * Warn and try to continue.
459                                  */
460                                 need_group = 1;
461                         }
462                 }
463         }
464 unlock:
465         mutex_unlock(&uctxt->exp_lock);
466 nomem:
467         hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx,
468                   mapped_pages, ret);
469         if (tididx) {
470                 spin_lock(&fd->tid_lock);
471                 fd->tid_used += tididx;
472                 spin_unlock(&fd->tid_lock);
473                 tinfo->tidcnt = tididx;
474                 tinfo->length = mapped_pages * PAGE_SIZE;
475
476                 if (copy_to_user((void __user *)(unsigned long)tinfo->tidlist,
477                                  tidlist, sizeof(tidlist[0]) * tididx)) {
478                         /*
479                          * On failure to copy to the user level, we need to undo
480                          * everything done so far so we don't leak resources.
481                          */
482                         tinfo->tidlist = (unsigned long)&tidlist;
483                         hfi1_user_exp_rcv_clear(fd, tinfo);
484                         tinfo->tidlist = 0;
485                         ret = -EFAULT;
486                         goto bail;
487                 }
488         }
489
490         /*
491          * If not everything was mapped (due to insufficient RcvArray entries,
492          * for example), unpin all unmapped pages so we can pin them nex time.
493          */
494         if (mapped_pages != pinned)
495                 unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages,
496                                 (pinned - mapped_pages), false);
497 bail:
498         kfree(tidbuf->psets);
499         kfree(tidlist);
500         kfree(tidbuf->pages);
501         kfree(tidbuf);
502         return ret > 0 ? 0 : ret;
503 }
504
505 int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
506                             struct hfi1_tid_info *tinfo)
507 {
508         int ret = 0;
509         struct hfi1_ctxtdata *uctxt = fd->uctxt;
510         u32 *tidinfo;
511         unsigned tididx;
512
513         if (unlikely(tinfo->tidcnt > fd->tid_used))
514                 return -EINVAL;
515
516         tidinfo = memdup_user((void __user *)(unsigned long)tinfo->tidlist,
517                               sizeof(tidinfo[0]) * tinfo->tidcnt);
518         if (IS_ERR(tidinfo))
519                 return PTR_ERR(tidinfo);
520
521         mutex_lock(&uctxt->exp_lock);
522         for (tididx = 0; tididx < tinfo->tidcnt; tididx++) {
523                 ret = unprogram_rcvarray(fd, tidinfo[tididx], NULL);
524                 if (ret) {
525                         hfi1_cdbg(TID, "Failed to unprogram rcv array %d",
526                                   ret);
527                         break;
528                 }
529         }
530         spin_lock(&fd->tid_lock);
531         fd->tid_used -= tididx;
532         spin_unlock(&fd->tid_lock);
533         tinfo->tidcnt = tididx;
534         mutex_unlock(&uctxt->exp_lock);
535
536         kfree(tidinfo);
537         return ret;
538 }
539
540 int hfi1_user_exp_rcv_invalid(struct hfi1_filedata *fd,
541                               struct hfi1_tid_info *tinfo)
542 {
543         struct hfi1_ctxtdata *uctxt = fd->uctxt;
544         unsigned long *ev = uctxt->dd->events +
545                 (((uctxt->ctxt - uctxt->dd->first_dyn_alloc_ctxt) *
546                   HFI1_MAX_SHARED_CTXTS) + fd->subctxt);
547         u32 *array;
548         int ret = 0;
549
550         if (!fd->invalid_tids)
551                 return -EINVAL;
552
553         /*
554          * copy_to_user() can sleep, which will leave the invalid_lock
555          * locked and cause the MMU notifier to be blocked on the lock
556          * for a long time.
557          * Copy the data to a local buffer so we can release the lock.
558          */
559         array = kcalloc(uctxt->expected_count, sizeof(*array), GFP_KERNEL);
560         if (!array)
561                 return -EFAULT;
562
563         spin_lock(&fd->invalid_lock);
564         if (fd->invalid_tid_idx) {
565                 memcpy(array, fd->invalid_tids, sizeof(*array) *
566                        fd->invalid_tid_idx);
567                 memset(fd->invalid_tids, 0, sizeof(*fd->invalid_tids) *
568                        fd->invalid_tid_idx);
569                 tinfo->tidcnt = fd->invalid_tid_idx;
570                 fd->invalid_tid_idx = 0;
571                 /*
572                  * Reset the user flag while still holding the lock.
573                  * Otherwise, PSM can miss events.
574                  */
575                 clear_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
576         } else {
577                 tinfo->tidcnt = 0;
578         }
579         spin_unlock(&fd->invalid_lock);
580
581         if (tinfo->tidcnt) {
582                 if (copy_to_user((void __user *)tinfo->tidlist,
583                                  array, sizeof(*array) * tinfo->tidcnt))
584                         ret = -EFAULT;
585         }
586         kfree(array);
587
588         return ret;
589 }
590
591 static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages)
592 {
593         unsigned pagecount, pageidx, setcount = 0, i;
594         unsigned long pfn, this_pfn;
595         struct page **pages = tidbuf->pages;
596         struct tid_pageset *list = tidbuf->psets;
597
598         if (!npages)
599                 return 0;
600
601         /*
602          * Look for sets of physically contiguous pages in the user buffer.
603          * This will allow us to optimize Expected RcvArray entry usage by
604          * using the bigger supported sizes.
605          */
606         pfn = page_to_pfn(pages[0]);
607         for (pageidx = 0, pagecount = 1, i = 1; i <= npages; i++) {
608                 this_pfn = i < npages ? page_to_pfn(pages[i]) : 0;
609
610                 /*
611                  * If the pfn's are not sequential, pages are not physically
612                  * contiguous.
613                  */
614                 if (this_pfn != ++pfn) {
615                         /*
616                          * At this point we have to loop over the set of
617                          * physically contiguous pages and break them down it
618                          * sizes supported by the HW.
619                          * There are two main constraints:
620                          *     1. The max buffer size is MAX_EXPECTED_BUFFER.
621                          *        If the total set size is bigger than that
622                          *        program only a MAX_EXPECTED_BUFFER chunk.
623                          *     2. The buffer size has to be a power of two. If
624                          *        it is not, round down to the closes power of
625                          *        2 and program that size.
626                          */
627                         while (pagecount) {
628                                 int maxpages = pagecount;
629                                 u32 bufsize = pagecount * PAGE_SIZE;
630
631                                 if (bufsize > MAX_EXPECTED_BUFFER)
632                                         maxpages =
633                                                 MAX_EXPECTED_BUFFER >>
634                                                 PAGE_SHIFT;
635                                 else if (!is_power_of_2(bufsize))
636                                         maxpages =
637                                                 rounddown_pow_of_two(bufsize) >>
638                                                 PAGE_SHIFT;
639
640                                 list[setcount].idx = pageidx;
641                                 list[setcount].count = maxpages;
642                                 pagecount -= maxpages;
643                                 pageidx += maxpages;
644                                 setcount++;
645                         }
646                         pageidx = i;
647                         pagecount = 1;
648                         pfn = this_pfn;
649                 } else {
650                         pagecount++;
651                 }
652         }
653         return setcount;
654 }
655
656 /**
657  * program_rcvarray() - program an RcvArray group with receive buffers
658  * @fd: filedata pointer
659  * @tbuf: pointer to struct tid_user_buf that has the user buffer starting
660  *        virtual address, buffer length, page pointers, pagesets (array of
661  *        struct tid_pageset holding information on physically contiguous
662  *        chunks from the user buffer), and other fields.
663  * @grp: RcvArray group
664  * @start: starting index into sets array
665  * @count: number of struct tid_pageset's to program
666  * @tidlist: the array of u32 elements when the information about the
667  *           programmed RcvArray entries is to be encoded.
668  * @tididx: starting offset into tidlist
669  * @pmapped: (output parameter) number of pages programmed into the RcvArray
670  *           entries.
671  *
672  * This function will program up to 'count' number of RcvArray entries from the
673  * group 'grp'. To make best use of write-combining writes, the function will
674  * perform writes to the unused RcvArray entries which will be ignored by the
675  * HW. Each RcvArray entry will be programmed with a physically contiguous
676  * buffer chunk from the user's virtual buffer.
677  *
678  * Return:
679  * -EINVAL if the requested count is larger than the size of the group,
680  * -ENOMEM or -EFAULT on error from set_rcvarray_entry(), or
681  * number of RcvArray entries programmed.
682  */
683 static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *tbuf,
684                             struct tid_group *grp,
685                             unsigned int start, u16 count,
686                             u32 *tidlist, unsigned int *tididx,
687                             unsigned int *pmapped)
688 {
689         struct hfi1_ctxtdata *uctxt = fd->uctxt;
690         struct hfi1_devdata *dd = uctxt->dd;
691         u16 idx;
692         u32 tidinfo = 0, rcventry, useidx = 0;
693         int mapped = 0;
694
695         /* Count should never be larger than the group size */
696         if (count > grp->size)
697                 return -EINVAL;
698
699         /* Find the first unused entry in the group */
700         for (idx = 0; idx < grp->size; idx++) {
701                 if (!(grp->map & (1 << idx))) {
702                         useidx = idx;
703                         break;
704                 }
705                 rcv_array_wc_fill(dd, grp->base + idx);
706         }
707
708         idx = 0;
709         while (idx < count) {
710                 u16 npages, pageidx, setidx = start + idx;
711                 int ret = 0;
712
713                 /*
714                  * If this entry in the group is used, move to the next one.
715                  * If we go past the end of the group, exit the loop.
716                  */
717                 if (useidx >= grp->size) {
718                         break;
719                 } else if (grp->map & (1 << useidx)) {
720                         rcv_array_wc_fill(dd, grp->base + useidx);
721                         useidx++;
722                         continue;
723                 }
724
725                 rcventry = grp->base + useidx;
726                 npages = tbuf->psets[setidx].count;
727                 pageidx = tbuf->psets[setidx].idx;
728
729                 ret = set_rcvarray_entry(fd, tbuf,
730                                          rcventry, grp, pageidx,
731                                          npages);
732                 if (ret)
733                         return ret;
734                 mapped += npages;
735
736                 tidinfo = rcventry2tidinfo(rcventry - uctxt->expected_base) |
737                         EXP_TID_SET(LEN, npages);
738                 tidlist[(*tididx)++] = tidinfo;
739                 grp->used++;
740                 grp->map |= 1 << useidx++;
741                 idx++;
742         }
743
744         /* Fill the rest of the group with "blank" writes */
745         for (; useidx < grp->size; useidx++)
746                 rcv_array_wc_fill(dd, grp->base + useidx);
747         *pmapped = mapped;
748         return idx;
749 }
750
751 static int set_rcvarray_entry(struct hfi1_filedata *fd,
752                               struct tid_user_buf *tbuf,
753                               u32 rcventry, struct tid_group *grp,
754                               u16 pageidx, unsigned int npages)
755 {
756         int ret;
757         struct hfi1_ctxtdata *uctxt = fd->uctxt;
758         struct tid_rb_node *node;
759         struct hfi1_devdata *dd = uctxt->dd;
760         dma_addr_t phys;
761         struct page **pages = tbuf->pages + pageidx;
762
763         /*
764          * Allocate the node first so we can handle a potential
765          * failure before we've programmed anything.
766          */
767         node = kzalloc(sizeof(*node) + (sizeof(struct page *) * npages),
768                        GFP_KERNEL);
769         if (!node)
770                 return -ENOMEM;
771
772         phys = pci_map_single(dd->pcidev,
773                               __va(page_to_phys(pages[0])),
774                               npages * PAGE_SIZE, PCI_DMA_FROMDEVICE);
775         if (dma_mapping_error(&dd->pcidev->dev, phys)) {
776                 dd_dev_err(dd, "Failed to DMA map Exp Rcv pages 0x%llx\n",
777                            phys);
778                 kfree(node);
779                 return -EFAULT;
780         }
781
782         node->mmu.addr = tbuf->vaddr + (pageidx * PAGE_SIZE);
783         node->mmu.len = npages * PAGE_SIZE;
784         node->phys = page_to_phys(pages[0]);
785         node->npages = npages;
786         node->rcventry = rcventry;
787         node->dma_addr = phys;
788         node->grp = grp;
789         node->freed = false;
790         memcpy(node->pages, pages, sizeof(struct page *) * npages);
791
792         if (!fd->handler)
793                 ret = tid_rb_insert(fd, &node->mmu);
794         else
795                 ret = hfi1_mmu_rb_insert(fd->handler, &node->mmu);
796
797         if (ret) {
798                 hfi1_cdbg(TID, "Failed to insert RB node %u 0x%lx, 0x%lx %d",
799                           node->rcventry, node->mmu.addr, node->phys, ret);
800                 pci_unmap_single(dd->pcidev, phys, npages * PAGE_SIZE,
801                                  PCI_DMA_FROMDEVICE);
802                 kfree(node);
803                 return -EFAULT;
804         }
805         hfi1_put_tid(dd, rcventry, PT_EXPECTED, phys, ilog2(npages) + 1);
806         trace_hfi1_exp_tid_reg(uctxt->ctxt, fd->subctxt, rcventry, npages,
807                                node->mmu.addr, node->phys, phys);
808         return 0;
809 }
810
811 static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
812                               struct tid_group **grp)
813 {
814         struct hfi1_ctxtdata *uctxt = fd->uctxt;
815         struct hfi1_devdata *dd = uctxt->dd;
816         struct tid_rb_node *node;
817         u8 tidctrl = EXP_TID_GET(tidinfo, CTRL);
818         u32 tididx = EXP_TID_GET(tidinfo, IDX) << 1, rcventry;
819
820         if (tididx >= uctxt->expected_count) {
821                 dd_dev_err(dd, "Invalid RcvArray entry (%u) index for ctxt %u\n",
822                            tididx, uctxt->ctxt);
823                 return -EINVAL;
824         }
825
826         if (tidctrl == 0x3)
827                 return -EINVAL;
828
829         rcventry = tididx + (tidctrl - 1);
830
831         node = fd->entry_to_rb[rcventry];
832         if (!node || node->rcventry != (uctxt->expected_base + rcventry))
833                 return -EBADF;
834
835         if (grp)
836                 *grp = node->grp;
837
838         if (!fd->handler)
839                 cacheless_tid_rb_remove(fd, node);
840         else
841                 hfi1_mmu_rb_remove(fd->handler, &node->mmu);
842
843         return 0;
844 }
845
846 static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
847 {
848         struct hfi1_ctxtdata *uctxt = fd->uctxt;
849         struct hfi1_devdata *dd = uctxt->dd;
850
851         trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry,
852                                  node->npages, node->mmu.addr, node->phys,
853                                  node->dma_addr);
854
855         /*
856          * Make sure device has seen the write before we unpin the
857          * pages.
858          */
859         hfi1_put_tid(dd, node->rcventry, PT_INVALID_FLUSH, 0, 0);
860
861         unpin_rcv_pages(fd, NULL, node, 0, node->npages, true);
862
863         node->grp->used--;
864         node->grp->map &= ~(1 << (node->rcventry - node->grp->base));
865
866         if (node->grp->used == node->grp->size - 1)
867                 tid_group_move(node->grp, &uctxt->tid_full_list,
868                                &uctxt->tid_used_list);
869         else if (!node->grp->used)
870                 tid_group_move(node->grp, &uctxt->tid_used_list,
871                                &uctxt->tid_group_list);
872         kfree(node);
873 }
874
875 /*
876  * As a simple helper for hfi1_user_exp_rcv_free, this function deals with
877  * clearing nodes in the non-cached case.
878  */
879 static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
880                             struct exp_tid_set *set,
881                             struct hfi1_filedata *fd)
882 {
883         struct tid_group *grp, *ptr;
884         int i;
885
886         list_for_each_entry_safe(grp, ptr, &set->list, list) {
887                 list_del_init(&grp->list);
888
889                 for (i = 0; i < grp->size; i++) {
890                         if (grp->map & (1 << i)) {
891                                 u16 rcventry = grp->base + i;
892                                 struct tid_rb_node *node;
893
894                                 node = fd->entry_to_rb[rcventry -
895                                                           uctxt->expected_base];
896                                 if (!node || node->rcventry != rcventry)
897                                         continue;
898
899                                 cacheless_tid_rb_remove(fd, node);
900                         }
901                 }
902         }
903 }
904
905 /*
906  * Always return 0 from this function.  A non-zero return indicates that the
907  * remove operation will be called and that memory should be unpinned.
908  * However, the driver cannot unpin out from under PSM.  Instead, retain the
909  * memory (by returning 0) and inform PSM that the memory is going away.  PSM
910  * will call back later when it has removed the memory from its list.
911  */
912 static int tid_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
913 {
914         struct hfi1_filedata *fdata = arg;
915         struct hfi1_ctxtdata *uctxt = fdata->uctxt;
916         struct tid_rb_node *node =
917                 container_of(mnode, struct tid_rb_node, mmu);
918
919         if (node->freed)
920                 return 0;
921
922         trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt, node->mmu.addr,
923                                  node->rcventry, node->npages, node->dma_addr);
924         node->freed = true;
925
926         spin_lock(&fdata->invalid_lock);
927         if (fdata->invalid_tid_idx < uctxt->expected_count) {
928                 fdata->invalid_tids[fdata->invalid_tid_idx] =
929                         rcventry2tidinfo(node->rcventry - uctxt->expected_base);
930                 fdata->invalid_tids[fdata->invalid_tid_idx] |=
931                         EXP_TID_SET(LEN, node->npages);
932                 if (!fdata->invalid_tid_idx) {
933                         unsigned long *ev;
934
935                         /*
936                          * hfi1_set_uevent_bits() sets a user event flag
937                          * for all processes. Because calling into the
938                          * driver to process TID cache invalidations is
939                          * expensive and TID cache invalidations are
940                          * handled on a per-process basis, we can
941                          * optimize this to set the flag only for the
942                          * process in question.
943                          */
944                         ev = uctxt->dd->events +
945                           (((uctxt->ctxt - uctxt->dd->first_dyn_alloc_ctxt) *
946                             HFI1_MAX_SHARED_CTXTS) + fdata->subctxt);
947                         set_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
948                 }
949                 fdata->invalid_tid_idx++;
950         }
951         spin_unlock(&fdata->invalid_lock);
952         return 0;
953 }
954
955 static int tid_rb_insert(void *arg, struct mmu_rb_node *node)
956 {
957         struct hfi1_filedata *fdata = arg;
958         struct tid_rb_node *tnode =
959                 container_of(node, struct tid_rb_node, mmu);
960         u32 base = fdata->uctxt->expected_base;
961
962         fdata->entry_to_rb[tnode->rcventry - base] = tnode;
963         return 0;
964 }
965
966 static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
967                                     struct tid_rb_node *tnode)
968 {
969         u32 base = fdata->uctxt->expected_base;
970
971         fdata->entry_to_rb[tnode->rcventry - base] = NULL;
972         clear_tid_node(fdata, tnode);
973 }
974
975 static void tid_rb_remove(void *arg, struct mmu_rb_node *node)
976 {
977         struct hfi1_filedata *fdata = arg;
978         struct tid_rb_node *tnode =
979                 container_of(node, struct tid_rb_node, mmu);
980
981         cacheless_tid_rb_remove(fdata, tnode);
982 }