4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 #include <linux/sched.h>
40 #include <linux/highmem.h>
41 #include <linux/pagemap.h>
43 #define DEBUG_SUBSYSTEM S_LLITE
45 #include "../include/obd_support.h"
46 #include "../include/lustre_lite.h"
47 #include "../include/lustre_dlm.h"
48 #include "llite_internal.h"
50 #define SA_OMITTED_ENTRY_MAX 8ULL
53 /** negative values are for error cases */
54 SA_ENTRY_INIT = 0, /** init entry */
55 SA_ENTRY_SUCC = 1, /** stat succeed */
56 SA_ENTRY_INVA = 2, /** invalid entry */
57 SA_ENTRY_DEST = 3, /** entry to be destroyed */
61 /* link into sai->sai_entries */
62 struct list_head se_link;
63 /* link into sai->sai_entries_{received,stated} */
64 struct list_head se_list;
65 /* link into sai hash table locally */
66 struct list_head se_hash;
67 /* entry reference count */
69 /* entry index in the sai */
71 /* low layer ldlm lock handle */
75 /* entry size, contains name */
77 /* pointer to async getattr enqueue info */
78 struct md_enqueue_info *se_minfo;
79 /* pointer to the async getattr request */
80 struct ptlrpc_request *se_req;
81 /* pointer to the target inode */
82 struct inode *se_inode;
87 static unsigned int sai_generation = 0;
88 static DEFINE_SPINLOCK(sai_generation_lock);
90 static inline int ll_sa_entry_unhashed(struct ll_sa_entry *entry)
92 return list_empty(&entry->se_hash);
96 * The entry only can be released by the caller, it is necessary to hold lock.
98 static inline int ll_sa_entry_stated(struct ll_sa_entry *entry)
101 return (entry->se_stat != SA_ENTRY_INIT);
104 static inline int ll_sa_entry_hash(int val)
106 return val & LL_SA_CACHE_MASK;
110 * Insert entry to hash SA table.
113 ll_sa_entry_enhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
115 int i = ll_sa_entry_hash(entry->se_qstr.hash);
117 spin_lock(&sai->sai_cache_lock[i]);
118 list_add_tail(&entry->se_hash, &sai->sai_cache[i]);
119 spin_unlock(&sai->sai_cache_lock[i]);
123 * Remove entry from SA table.
126 ll_sa_entry_unhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
128 int i = ll_sa_entry_hash(entry->se_qstr.hash);
130 spin_lock(&sai->sai_cache_lock[i]);
131 list_del_init(&entry->se_hash);
132 spin_unlock(&sai->sai_cache_lock[i]);
135 static inline int agl_should_run(struct ll_statahead_info *sai,
138 return (inode != NULL && S_ISREG(inode->i_mode) && sai->sai_agl_valid);
141 static inline struct ll_sa_entry *
142 sa_first_received_entry(struct ll_statahead_info *sai)
144 return list_entry(sai->sai_entries_received.next,
145 struct ll_sa_entry, se_list);
148 static inline struct ll_inode_info *
149 agl_first_entry(struct ll_statahead_info *sai)
151 return list_entry(sai->sai_entries_agl.next,
152 struct ll_inode_info, lli_agl_list);
155 static inline int sa_sent_full(struct ll_statahead_info *sai)
157 return atomic_read(&sai->sai_cache_count) >= sai->sai_max;
160 static inline int sa_received_empty(struct ll_statahead_info *sai)
162 return list_empty(&sai->sai_entries_received);
165 static inline int agl_list_empty(struct ll_statahead_info *sai)
167 return list_empty(&sai->sai_entries_agl);
171 * (1) hit ratio less than 80%
173 * (2) consecutive miss more than 8
174 * then means low hit.
176 static inline int sa_low_hit(struct ll_statahead_info *sai)
178 return ((sai->sai_hit > 7 && sai->sai_hit < 4 * sai->sai_miss) ||
179 (sai->sai_consecutive_miss > 8));
183 * If the given index is behind of statahead window more than
184 * SA_OMITTED_ENTRY_MAX, then it is old.
186 static inline int is_omitted_entry(struct ll_statahead_info *sai, __u64 index)
188 return ((__u64)sai->sai_max + index + SA_OMITTED_ENTRY_MAX <
193 * Insert it into sai_entries tail when init.
195 static struct ll_sa_entry *
196 ll_sa_entry_alloc(struct ll_statahead_info *sai, __u64 index,
197 const char *name, int len)
199 struct ll_inode_info *lli;
200 struct ll_sa_entry *entry;
204 entry_size = sizeof(struct ll_sa_entry) + (len & ~3) + 4;
205 entry = kzalloc(entry_size, GFP_NOFS);
206 if (unlikely(!entry))
207 return ERR_PTR(-ENOMEM);
209 CDEBUG(D_READA, "alloc sa entry %.*s(%p) index %llu\n",
210 len, name, entry, index);
212 entry->se_index = index;
215 * Statahead entry reference rules:
217 * 1) When statahead entry is initialized, its reference is set as 2.
218 * One reference is used by the directory scanner. When the scanner
219 * searches the statahead cache for the given name, it can perform
220 * lockless hash lookup (only the scanner can remove entry from hash
221 * list), and once found, it needn't to call "atomic_inc()" for the
222 * entry reference. So the performance is improved. After using the
223 * statahead entry, the scanner will call "atomic_dec()" to drop the
224 * reference held when initialization. If it is the last reference,
225 * the statahead entry will be freed.
227 * 2) All other threads, including statahead thread and ptlrpcd thread,
228 * when they process the statahead entry, the reference for target
229 * should be held to guarantee the entry will not be released by the
230 * directory scanner. After processing the entry, these threads will
231 * drop the entry reference. If it is the last reference, the entry
234 * The second reference when initializes the statahead entry is used
235 * by the statahead thread, following the rule 2).
237 atomic_set(&entry->se_refcount, 2);
238 entry->se_stat = SA_ENTRY_INIT;
239 entry->se_size = entry_size;
240 dname = (char *)entry + sizeof(struct ll_sa_entry);
241 memcpy(dname, name, len);
243 entry->se_qstr.hash = full_name_hash(name, len);
244 entry->se_qstr.len = len;
245 entry->se_qstr.name = dname;
247 lli = ll_i2info(sai->sai_inode);
248 spin_lock(&lli->lli_sa_lock);
249 list_add_tail(&entry->se_link, &sai->sai_entries);
250 INIT_LIST_HEAD(&entry->se_list);
251 ll_sa_entry_enhash(sai, entry);
252 spin_unlock(&lli->lli_sa_lock);
254 atomic_inc(&sai->sai_cache_count);
260 * Used by the directory scanner to search entry with name.
262 * Only the caller can remove the entry from hash, so it is unnecessary to hold
263 * hash lock. It is caller's duty to release the init refcount on the entry, so
264 * it is also unnecessary to increase refcount on the entry.
266 static struct ll_sa_entry *
267 ll_sa_entry_get_byname(struct ll_statahead_info *sai, const struct qstr *qstr)
269 struct ll_sa_entry *entry;
270 int i = ll_sa_entry_hash(qstr->hash);
272 list_for_each_entry(entry, &sai->sai_cache[i], se_hash) {
273 if (entry->se_qstr.hash == qstr->hash &&
274 entry->se_qstr.len == qstr->len &&
275 memcmp(entry->se_qstr.name, qstr->name, qstr->len) == 0)
282 * Used by the async getattr request callback to find entry with index.
284 * Inside lli_sa_lock to prevent others to change the list during the search.
285 * It needs to increase entry refcount before returning to guarantee that the
286 * entry cannot be freed by others.
288 static struct ll_sa_entry *
289 ll_sa_entry_get_byindex(struct ll_statahead_info *sai, __u64 index)
291 struct ll_sa_entry *entry;
293 list_for_each_entry(entry, &sai->sai_entries, se_link) {
294 if (entry->se_index == index) {
295 LASSERT(atomic_read(&entry->se_refcount) > 0);
296 atomic_inc(&entry->se_refcount);
299 if (entry->se_index > index)
305 static void ll_sa_entry_cleanup(struct ll_statahead_info *sai,
306 struct ll_sa_entry *entry)
308 struct md_enqueue_info *minfo = entry->se_minfo;
309 struct ptlrpc_request *req = entry->se_req;
312 entry->se_minfo = NULL;
313 ll_intent_release(&minfo->mi_it);
319 entry->se_req = NULL;
320 ptlrpc_req_finished(req);
324 static void ll_sa_entry_put(struct ll_statahead_info *sai,
325 struct ll_sa_entry *entry)
327 if (atomic_dec_and_test(&entry->se_refcount)) {
328 CDEBUG(D_READA, "free sa entry %.*s(%p) index %llu\n",
329 entry->se_qstr.len, entry->se_qstr.name, entry,
332 LASSERT(list_empty(&entry->se_link));
333 LASSERT(list_empty(&entry->se_list));
334 LASSERT(ll_sa_entry_unhashed(entry));
336 ll_sa_entry_cleanup(sai, entry);
337 iput(entry->se_inode);
339 OBD_FREE(entry, entry->se_size);
340 atomic_dec(&sai->sai_cache_count);
345 do_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
347 struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
349 LASSERT(!ll_sa_entry_unhashed(entry));
350 LASSERT(!list_empty(&entry->se_link));
352 ll_sa_entry_unhash(sai, entry);
354 spin_lock(&lli->lli_sa_lock);
355 entry->se_stat = SA_ENTRY_DEST;
356 list_del_init(&entry->se_link);
357 if (likely(!list_empty(&entry->se_list)))
358 list_del_init(&entry->se_list);
359 spin_unlock(&lli->lli_sa_lock);
361 ll_sa_entry_put(sai, entry);
365 * Delete it from sai_entries_stated list when fini.
368 ll_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
370 struct ll_sa_entry *pos, *next;
373 do_sa_entry_fini(sai, entry);
375 /* drop old entry, only 'scanner' process does this, no need to lock */
376 list_for_each_entry_safe(pos, next, &sai->sai_entries, se_link) {
377 if (!is_omitted_entry(sai, pos->se_index))
379 do_sa_entry_fini(sai, pos);
384 * Inside lli_sa_lock.
387 do_sa_entry_to_stated(struct ll_statahead_info *sai,
388 struct ll_sa_entry *entry, se_stat_t stat)
390 struct ll_sa_entry *se;
391 struct list_head *pos = &sai->sai_entries_stated;
393 if (!list_empty(&entry->se_list))
394 list_del_init(&entry->se_list);
396 list_for_each_entry_reverse(se, &sai->sai_entries_stated, se_list) {
397 if (se->se_index < entry->se_index) {
403 list_add(&entry->se_list, pos);
404 entry->se_stat = stat;
408 * Move entry to sai_entries_stated and sort with the index.
409 * \retval 1 -- entry to be destroyed.
410 * \retval 0 -- entry is inserted into stated list.
413 ll_sa_entry_to_stated(struct ll_statahead_info *sai,
414 struct ll_sa_entry *entry, se_stat_t stat)
416 struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
419 ll_sa_entry_cleanup(sai, entry);
421 spin_lock(&lli->lli_sa_lock);
422 if (likely(entry->se_stat != SA_ENTRY_DEST)) {
423 do_sa_entry_to_stated(sai, entry, stat);
426 spin_unlock(&lli->lli_sa_lock);
432 * Insert inode into the list of sai_entries_agl.
434 static void ll_agl_add(struct ll_statahead_info *sai,
435 struct inode *inode, int index)
437 struct ll_inode_info *child = ll_i2info(inode);
438 struct ll_inode_info *parent = ll_i2info(sai->sai_inode);
441 spin_lock(&child->lli_agl_lock);
442 if (child->lli_agl_index == 0) {
443 child->lli_agl_index = index;
444 spin_unlock(&child->lli_agl_lock);
446 LASSERT(list_empty(&child->lli_agl_list));
449 spin_lock(&parent->lli_agl_lock);
450 if (agl_list_empty(sai))
452 list_add_tail(&child->lli_agl_list, &sai->sai_entries_agl);
453 spin_unlock(&parent->lli_agl_lock);
455 spin_unlock(&child->lli_agl_lock);
459 wake_up(&sai->sai_agl_thread.t_ctl_waitq);
462 static struct ll_statahead_info *ll_sai_alloc(void)
464 struct ll_statahead_info *sai;
467 sai = kzalloc(sizeof(*sai), GFP_NOFS);
471 atomic_set(&sai->sai_refcount, 1);
473 spin_lock(&sai_generation_lock);
474 sai->sai_generation = ++sai_generation;
475 if (unlikely(sai_generation == 0))
476 sai->sai_generation = ++sai_generation;
477 spin_unlock(&sai_generation_lock);
479 sai->sai_max = LL_SA_RPC_MIN;
481 init_waitqueue_head(&sai->sai_waitq);
482 init_waitqueue_head(&sai->sai_thread.t_ctl_waitq);
483 init_waitqueue_head(&sai->sai_agl_thread.t_ctl_waitq);
485 INIT_LIST_HEAD(&sai->sai_entries);
486 INIT_LIST_HEAD(&sai->sai_entries_received);
487 INIT_LIST_HEAD(&sai->sai_entries_stated);
488 INIT_LIST_HEAD(&sai->sai_entries_agl);
490 for (i = 0; i < LL_SA_CACHE_SIZE; i++) {
491 INIT_LIST_HEAD(&sai->sai_cache[i]);
492 spin_lock_init(&sai->sai_cache_lock[i]);
494 atomic_set(&sai->sai_cache_count, 0);
499 static inline struct ll_statahead_info *
500 ll_sai_get(struct ll_statahead_info *sai)
502 atomic_inc(&sai->sai_refcount);
506 static void ll_sai_put(struct ll_statahead_info *sai)
508 struct inode *inode = sai->sai_inode;
509 struct ll_inode_info *lli = ll_i2info(inode);
511 if (atomic_dec_and_lock(&sai->sai_refcount, &lli->lli_sa_lock)) {
512 struct ll_sa_entry *entry, *next;
514 if (unlikely(atomic_read(&sai->sai_refcount) > 0)) {
515 /* It is race case, the interpret callback just hold
516 * a reference count */
517 spin_unlock(&lli->lli_sa_lock);
521 LASSERT(lli->lli_opendir_key == NULL);
522 LASSERT(thread_is_stopped(&sai->sai_thread));
523 LASSERT(thread_is_stopped(&sai->sai_agl_thread));
526 lli->lli_opendir_pid = 0;
527 spin_unlock(&lli->lli_sa_lock);
529 if (sai->sai_sent > sai->sai_replied)
530 CDEBUG(D_READA, "statahead for dir "DFID
531 " does not finish: [sent:%llu] [replied:%llu]\n",
533 sai->sai_sent, sai->sai_replied);
535 list_for_each_entry_safe(entry, next,
536 &sai->sai_entries, se_link)
537 do_sa_entry_fini(sai, entry);
539 LASSERT(list_empty(&sai->sai_entries));
540 LASSERT(sa_received_empty(sai));
541 LASSERT(list_empty(&sai->sai_entries_stated));
543 LASSERT(atomic_read(&sai->sai_cache_count) == 0);
544 LASSERT(agl_list_empty(sai));
551 /* Do NOT forget to drop inode refcount when into sai_entries_agl. */
552 static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
554 struct ll_inode_info *lli = ll_i2info(inode);
555 __u64 index = lli->lli_agl_index;
558 LASSERT(list_empty(&lli->lli_agl_list));
560 /* AGL maybe fall behind statahead with one entry */
561 if (is_omitted_entry(sai, index + 1)) {
562 lli->lli_agl_index = 0;
567 /* Someone is in glimpse (sync or async), do nothing. */
568 rc = down_write_trylock(&lli->lli_glimpse_sem);
570 lli->lli_agl_index = 0;
576 * Someone triggered glimpse within 1 sec before.
577 * 1) The former glimpse succeeded with glimpse lock granted by OST, and
578 * if the lock is still cached on client, AGL needs to do nothing. If
579 * it is cancelled by other client, AGL maybe cannot obtain new lock
580 * for no glimpse callback triggered by AGL.
581 * 2) The former glimpse succeeded, but OST did not grant glimpse lock.
582 * Under such case, it is quite possible that the OST will not grant
583 * glimpse lock for AGL also.
584 * 3) The former glimpse failed, compared with other two cases, it is
585 * relative rare. AGL can ignore such case, and it will not muchly
586 * affect the performance.
588 if (lli->lli_glimpse_time != 0 &&
589 time_before(cfs_time_shift(-1), lli->lli_glimpse_time)) {
590 up_write(&lli->lli_glimpse_sem);
591 lli->lli_agl_index = 0;
596 CDEBUG(D_READA, "Handling (init) async glimpse: inode = "
597 DFID", idx = %llu\n", PFID(&lli->lli_fid), index);
600 lli->lli_agl_index = 0;
601 lli->lli_glimpse_time = cfs_time_current();
602 up_write(&lli->lli_glimpse_sem);
604 CDEBUG(D_READA, "Handled (init) async glimpse: inode= "
605 DFID", idx = %llu, rc = %d\n",
606 PFID(&lli->lli_fid), index, rc);
611 static void ll_post_statahead(struct ll_statahead_info *sai)
613 struct inode *dir = sai->sai_inode;
615 struct ll_inode_info *lli = ll_i2info(dir);
616 struct ll_sa_entry *entry;
617 struct md_enqueue_info *minfo;
618 struct lookup_intent *it;
619 struct ptlrpc_request *req;
620 struct mdt_body *body;
623 spin_lock(&lli->lli_sa_lock);
624 if (unlikely(sa_received_empty(sai))) {
625 spin_unlock(&lli->lli_sa_lock);
628 entry = sa_first_received_entry(sai);
629 atomic_inc(&entry->se_refcount);
630 list_del_init(&entry->se_list);
631 spin_unlock(&lli->lli_sa_lock);
633 LASSERT(entry->se_handle != 0);
635 minfo = entry->se_minfo;
638 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
644 child = entry->se_inode;
649 LASSERT(fid_is_zero(&minfo->mi_data.op_fid2));
651 /* XXX: No fid in reply, this is probably cross-ref case.
652 * SA can't handle it yet. */
653 if (body->valid & OBD_MD_MDS) {
661 /* unlinked and re-created with the same name */
662 if (unlikely(!lu_fid_eq(&minfo->mi_data.op_fid2, &body->fid1))){
663 entry->se_inode = NULL;
669 it->d.lustre.it_lock_handle = entry->se_handle;
670 rc = md_revalidate_lock(ll_i2mdexp(dir), it, ll_inode2fid(dir), NULL);
676 rc = ll_prep_inode(&child, req, dir->i_sb, it);
680 CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n",
681 child, child->i_ino, child->i_generation);
682 ll_set_lock_data(ll_i2sbi(dir)->ll_md_exp, child, it, NULL);
684 entry->se_inode = child;
686 if (agl_should_run(sai, child))
687 ll_agl_add(sai, child, entry->se_index);
690 /* The "ll_sa_entry_to_stated()" will drop related ldlm ibits lock
691 * reference count by calling "ll_intent_drop_lock()" in spite of the
692 * above operations failed or not. Do not worry about calling
693 * "ll_intent_drop_lock()" more than once. */
694 rc = ll_sa_entry_to_stated(sai, entry,
695 rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
696 if (rc == 0 && entry->se_index == sai->sai_index_wait)
697 wake_up(&sai->sai_waitq);
698 ll_sa_entry_put(sai, entry);
701 static int ll_statahead_interpret(struct ptlrpc_request *req,
702 struct md_enqueue_info *minfo, int rc)
704 struct lookup_intent *it = &minfo->mi_it;
705 struct inode *dir = minfo->mi_dir;
706 struct ll_inode_info *lli = ll_i2info(dir);
707 struct ll_statahead_info *sai = NULL;
708 struct ll_sa_entry *entry;
711 if (it_disposition(it, DISP_LOOKUP_NEG))
714 spin_lock(&lli->lli_sa_lock);
716 if (unlikely(lli->lli_sai == NULL ||
717 lli->lli_sai->sai_generation != minfo->mi_generation)) {
718 spin_unlock(&lli->lli_sa_lock);
722 sai = ll_sai_get(lli->lli_sai);
723 if (unlikely(!thread_is_running(&sai->sai_thread))) {
725 spin_unlock(&lli->lli_sa_lock);
730 entry = ll_sa_entry_get_byindex(sai, minfo->mi_cbdata);
733 spin_unlock(&lli->lli_sa_lock);
739 do_sa_entry_to_stated(sai, entry, SA_ENTRY_INVA);
740 wakeup = (entry->se_index == sai->sai_index_wait);
742 entry->se_minfo = minfo;
743 entry->se_req = ptlrpc_request_addref(req);
744 /* Release the async ibits lock ASAP to avoid deadlock
745 * when statahead thread tries to enqueue lock on parent
746 * for readpage and other tries to enqueue lock on child
747 * with parent's lock held, for example: unlink. */
748 entry->se_handle = it->d.lustre.it_lock_handle;
749 ll_intent_drop_lock(it);
750 wakeup = sa_received_empty(sai);
751 list_add_tail(&entry->se_list,
752 &sai->sai_entries_received);
755 spin_unlock(&lli->lli_sa_lock);
757 ll_sa_entry_put(sai, entry);
759 wake_up(&sai->sai_thread.t_ctl_waitq);
764 ll_intent_release(it);
773 static void sa_args_fini(struct md_enqueue_info *minfo,
774 struct ldlm_enqueue_info *einfo)
776 LASSERT(minfo && einfo);
778 capa_put(minfo->mi_data.op_capa1);
779 capa_put(minfo->mi_data.op_capa2);
785 * There is race condition between "capa_put" and "ll_statahead_interpret" for
786 * accessing "op_data.op_capa[1,2]" as following:
787 * "capa_put" releases "op_data.op_capa[1,2]"'s reference count after calling
788 * "md_intent_getattr_async". But "ll_statahead_interpret" maybe run first, and
789 * fill "op_data.op_capa[1,2]" as POISON, then cause "capa_put" access invalid
790 * "ocapa". So here reserve "op_data.op_capa[1,2]" in "pcapa" before calling
791 * "md_intent_getattr_async".
793 static int sa_args_init(struct inode *dir, struct inode *child,
794 struct ll_sa_entry *entry, struct md_enqueue_info **pmi,
795 struct ldlm_enqueue_info **pei,
796 struct obd_capa **pcapa)
798 struct qstr *qstr = &entry->se_qstr;
799 struct ll_inode_info *lli = ll_i2info(dir);
800 struct md_enqueue_info *minfo;
801 struct ldlm_enqueue_info *einfo;
802 struct md_op_data *op_data;
804 einfo = kzalloc(sizeof(*einfo), GFP_NOFS);
808 minfo = kzalloc(sizeof(*minfo), GFP_NOFS);
814 op_data = ll_prep_md_op_data(&minfo->mi_data, dir, child, qstr->name,
815 qstr->len, 0, LUSTRE_OPC_ANY, NULL);
816 if (IS_ERR(op_data)) {
819 return PTR_ERR(op_data);
822 minfo->mi_it.it_op = IT_GETATTR;
823 minfo->mi_dir = igrab(dir);
824 minfo->mi_cb = ll_statahead_interpret;
825 minfo->mi_generation = lli->lli_sai->sai_generation;
826 minfo->mi_cbdata = entry->se_index;
828 einfo->ei_type = LDLM_IBITS;
829 einfo->ei_mode = it_to_lock_mode(&minfo->mi_it);
830 einfo->ei_cb_bl = ll_md_blocking_ast;
831 einfo->ei_cb_cp = ldlm_completion_ast;
832 einfo->ei_cb_gl = NULL;
833 einfo->ei_cbdata = NULL;
837 pcapa[0] = op_data->op_capa1;
838 pcapa[1] = op_data->op_capa2;
843 static int do_sa_lookup(struct inode *dir, struct ll_sa_entry *entry)
845 struct md_enqueue_info *minfo;
846 struct ldlm_enqueue_info *einfo;
847 struct obd_capa *capas[2];
850 rc = sa_args_init(dir, NULL, entry, &minfo, &einfo, capas);
854 rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
859 sa_args_fini(minfo, einfo);
866 * similar to ll_revalidate_it().
867 * \retval 1 -- dentry valid
868 * \retval 0 -- will send stat-ahead request
869 * \retval others -- prepare stat-ahead request failed
871 static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry,
872 struct dentry *dentry)
874 struct inode *inode = dentry->d_inode;
875 struct lookup_intent it = { .it_op = IT_GETATTR,
876 .d.lustre.it_lock_handle = 0 };
877 struct md_enqueue_info *minfo;
878 struct ldlm_enqueue_info *einfo;
879 struct obd_capa *capas[2];
882 if (unlikely(inode == NULL))
885 if (d_mountpoint(dentry))
888 entry->se_inode = igrab(inode);
889 rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode),
892 entry->se_handle = it.d.lustre.it_lock_handle;
893 ll_intent_release(&it);
897 rc = sa_args_init(dir, inode, entry, &minfo, &einfo, capas);
899 entry->se_inode = NULL;
904 rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
909 entry->se_inode = NULL;
911 sa_args_fini(minfo, einfo);
917 static void ll_statahead_one(struct dentry *parent, const char *entry_name,
920 struct inode *dir = parent->d_inode;
921 struct ll_inode_info *lli = ll_i2info(dir);
922 struct ll_statahead_info *sai = lli->lli_sai;
923 struct dentry *dentry = NULL;
924 struct ll_sa_entry *entry;
928 entry = ll_sa_entry_alloc(sai, sai->sai_index, entry_name,
933 dentry = d_lookup(parent, &entry->se_qstr);
935 rc = do_sa_lookup(dir, entry);
937 rc = do_sa_revalidate(dir, entry, dentry);
938 if (rc == 1 && agl_should_run(sai, dentry->d_inode))
939 ll_agl_add(sai, dentry->d_inode, entry->se_index);
946 rc1 = ll_sa_entry_to_stated(sai, entry,
947 rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
948 if (rc1 == 0 && entry->se_index == sai->sai_index_wait)
949 wake_up(&sai->sai_waitq);
955 /* drop one refcount on entry by ll_sa_entry_alloc */
956 ll_sa_entry_put(sai, entry);
959 static int ll_agl_thread(void *arg)
961 struct dentry *parent = (struct dentry *)arg;
962 struct inode *dir = parent->d_inode;
963 struct ll_inode_info *plli = ll_i2info(dir);
964 struct ll_inode_info *clli;
965 struct ll_sb_info *sbi = ll_i2sbi(dir);
966 struct ll_statahead_info *sai = ll_sai_get(plli->lli_sai);
967 struct ptlrpc_thread *thread = &sai->sai_agl_thread;
968 struct l_wait_info lwi = { 0 };
970 thread->t_pid = current_pid();
971 CDEBUG(D_READA, "agl thread started: sai %p, parent %pd\n",
974 atomic_inc(&sbi->ll_agl_total);
975 spin_lock(&plli->lli_agl_lock);
976 sai->sai_agl_valid = 1;
977 if (thread_is_init(thread))
978 /* If someone else has changed the thread state
979 * (e.g. already changed to SVC_STOPPING), we can't just
980 * blindly overwrite that setting. */
981 thread_set_flags(thread, SVC_RUNNING);
982 spin_unlock(&plli->lli_agl_lock);
983 wake_up(&thread->t_ctl_waitq);
986 l_wait_event(thread->t_ctl_waitq,
987 !agl_list_empty(sai) ||
988 !thread_is_running(thread),
991 if (!thread_is_running(thread))
994 spin_lock(&plli->lli_agl_lock);
995 /* The statahead thread maybe help to process AGL entries,
996 * so check whether list empty again. */
997 if (!agl_list_empty(sai)) {
998 clli = agl_first_entry(sai);
999 list_del_init(&clli->lli_agl_list);
1000 spin_unlock(&plli->lli_agl_lock);
1001 ll_agl_trigger(&clli->lli_vfs_inode, sai);
1003 spin_unlock(&plli->lli_agl_lock);
1007 spin_lock(&plli->lli_agl_lock);
1008 sai->sai_agl_valid = 0;
1009 while (!agl_list_empty(sai)) {
1010 clli = agl_first_entry(sai);
1011 list_del_init(&clli->lli_agl_list);
1012 spin_unlock(&plli->lli_agl_lock);
1013 clli->lli_agl_index = 0;
1014 iput(&clli->lli_vfs_inode);
1015 spin_lock(&plli->lli_agl_lock);
1017 thread_set_flags(thread, SVC_STOPPED);
1018 spin_unlock(&plli->lli_agl_lock);
1019 wake_up(&thread->t_ctl_waitq);
1021 CDEBUG(D_READA, "agl thread stopped: sai %p, parent %pd\n",
1026 static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
1028 struct ptlrpc_thread *thread = &sai->sai_agl_thread;
1029 struct l_wait_info lwi = { 0 };
1030 struct ll_inode_info *plli;
1031 struct task_struct *task;
1033 CDEBUG(D_READA, "start agl thread: sai %p, parent %pd\n",
1036 plli = ll_i2info(parent->d_inode);
1037 task = kthread_run(ll_agl_thread, parent,
1038 "ll_agl_%u", plli->lli_opendir_pid);
1040 CERROR("can't start ll_agl thread, rc: %ld\n", PTR_ERR(task));
1041 thread_set_flags(thread, SVC_STOPPED);
1045 l_wait_event(thread->t_ctl_waitq,
1046 thread_is_running(thread) || thread_is_stopped(thread),
1050 static int ll_statahead_thread(void *arg)
1052 struct dentry *parent = (struct dentry *)arg;
1053 struct inode *dir = parent->d_inode;
1054 struct ll_inode_info *plli = ll_i2info(dir);
1055 struct ll_inode_info *clli;
1056 struct ll_sb_info *sbi = ll_i2sbi(dir);
1057 struct ll_statahead_info *sai = ll_sai_get(plli->lli_sai);
1058 struct ptlrpc_thread *thread = &sai->sai_thread;
1059 struct ptlrpc_thread *agl_thread = &sai->sai_agl_thread;
1064 struct ll_dir_chain chain;
1065 struct l_wait_info lwi = { 0 };
1067 thread->t_pid = current_pid();
1068 CDEBUG(D_READA, "statahead thread starting: sai %p, parent %pd\n",
1071 if (sbi->ll_flags & LL_SBI_AGL_ENABLED)
1072 ll_start_agl(parent, sai);
1074 atomic_inc(&sbi->ll_sa_total);
1075 spin_lock(&plli->lli_sa_lock);
1076 if (thread_is_init(thread))
1077 /* If someone else has changed the thread state
1078 * (e.g. already changed to SVC_STOPPING), we can't just
1079 * blindly overwrite that setting. */
1080 thread_set_flags(thread, SVC_RUNNING);
1081 spin_unlock(&plli->lli_sa_lock);
1082 wake_up(&thread->t_ctl_waitq);
1084 ll_dir_chain_init(&chain);
1085 page = ll_get_dir_page(dir, pos, &chain);
1088 struct lu_dirpage *dp;
1089 struct lu_dirent *ent;
1093 CDEBUG(D_READA, "error reading dir "DFID" at %llu/%llu: [rc %d] [parent %u]\n",
1094 PFID(ll_inode2fid(dir)), pos, sai->sai_index,
1095 rc, plli->lli_opendir_pid);
1099 dp = page_address(page);
1100 for (ent = lu_dirent_start(dp); ent != NULL;
1101 ent = lu_dirent_next(ent)) {
1106 hash = le64_to_cpu(ent->lde_hash);
1107 if (unlikely(hash < pos))
1109 * Skip until we find target hash value.
1113 namelen = le16_to_cpu(ent->lde_namelen);
1114 if (unlikely(namelen == 0))
1116 * Skip dummy record.
1120 name = ent->lde_name;
1121 if (name[0] == '.') {
1127 } else if (name[1] == '.' && namelen == 2) {
1132 } else if (!sai->sai_ls_all) {
1134 * skip hidden files.
1136 sai->sai_skip_hidden++;
1142 * don't stat-ahead first entry.
1144 if (unlikely(++first == 1))
1148 l_wait_event(thread->t_ctl_waitq,
1149 !sa_sent_full(sai) ||
1150 !sa_received_empty(sai) ||
1151 !agl_list_empty(sai) ||
1152 !thread_is_running(thread),
1156 while (!sa_received_empty(sai))
1157 ll_post_statahead(sai);
1159 if (unlikely(!thread_is_running(thread))) {
1160 ll_release_page(page, 0);
1165 /* If no window for metadata statahead, but there are
1166 * some AGL entries to be triggered, then try to help
1167 * to process the AGL entries. */
1168 if (sa_sent_full(sai)) {
1169 spin_lock(&plli->lli_agl_lock);
1170 while (!agl_list_empty(sai)) {
1171 clli = agl_first_entry(sai);
1172 list_del_init(&clli->lli_agl_list);
1173 spin_unlock(&plli->lli_agl_lock);
1174 ll_agl_trigger(&clli->lli_vfs_inode,
1177 if (!sa_received_empty(sai))
1181 !thread_is_running(thread))) {
1182 ll_release_page(page, 0);
1187 if (!sa_sent_full(sai))
1190 spin_lock(&plli->lli_agl_lock);
1192 spin_unlock(&plli->lli_agl_lock);
1198 ll_statahead_one(parent, name, namelen);
1200 pos = le64_to_cpu(dp->ldp_hash_end);
1201 if (pos == MDS_DIR_END_OFF) {
1203 * End of directory reached.
1205 ll_release_page(page, 0);
1207 l_wait_event(thread->t_ctl_waitq,
1208 !sa_received_empty(sai) ||
1209 sai->sai_sent == sai->sai_replied||
1210 !thread_is_running(thread),
1213 while (!sa_received_empty(sai))
1214 ll_post_statahead(sai);
1216 if (unlikely(!thread_is_running(thread))) {
1221 if (sai->sai_sent == sai->sai_replied &&
1222 sa_received_empty(sai))
1226 spin_lock(&plli->lli_agl_lock);
1227 while (!agl_list_empty(sai) &&
1228 thread_is_running(thread)) {
1229 clli = agl_first_entry(sai);
1230 list_del_init(&clli->lli_agl_list);
1231 spin_unlock(&plli->lli_agl_lock);
1232 ll_agl_trigger(&clli->lli_vfs_inode, sai);
1233 spin_lock(&plli->lli_agl_lock);
1235 spin_unlock(&plli->lli_agl_lock);
1241 * chain is exhausted.
1242 * Normal case: continue to the next page.
1244 ll_release_page(page, le32_to_cpu(dp->ldp_flags) &
1246 page = ll_get_dir_page(dir, pos, &chain);
1248 LASSERT(le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
1249 ll_release_page(page, 1);
1251 * go into overflow page.
1257 if (sai->sai_agl_valid) {
1258 spin_lock(&plli->lli_agl_lock);
1259 thread_set_flags(agl_thread, SVC_STOPPING);
1260 spin_unlock(&plli->lli_agl_lock);
1261 wake_up(&agl_thread->t_ctl_waitq);
1263 CDEBUG(D_READA, "stop agl thread: sai %p pid %u\n",
1264 sai, (unsigned int)agl_thread->t_pid);
1265 l_wait_event(agl_thread->t_ctl_waitq,
1266 thread_is_stopped(agl_thread),
1269 /* Set agl_thread flags anyway. */
1270 thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED);
1272 ll_dir_chain_fini(&chain);
1273 spin_lock(&plli->lli_sa_lock);
1274 if (!sa_received_empty(sai)) {
1275 thread_set_flags(thread, SVC_STOPPING);
1276 spin_unlock(&plli->lli_sa_lock);
1278 /* To release the resources held by received entries. */
1279 while (!sa_received_empty(sai))
1280 ll_post_statahead(sai);
1282 spin_lock(&plli->lli_sa_lock);
1284 thread_set_flags(thread, SVC_STOPPED);
1285 spin_unlock(&plli->lli_sa_lock);
1286 wake_up(&sai->sai_waitq);
1287 wake_up(&thread->t_ctl_waitq);
1290 CDEBUG(D_READA, "statahead thread stopped: sai %p, parent %pd\n",
1296 * called in ll_file_release().
1298 void ll_stop_statahead(struct inode *dir, void *key)
1300 struct ll_inode_info *lli = ll_i2info(dir);
1302 if (unlikely(key == NULL))
1305 spin_lock(&lli->lli_sa_lock);
1306 if (lli->lli_opendir_key != key || lli->lli_opendir_pid == 0) {
1307 spin_unlock(&lli->lli_sa_lock);
1311 lli->lli_opendir_key = NULL;
1314 struct l_wait_info lwi = { 0 };
1315 struct ptlrpc_thread *thread = &lli->lli_sai->sai_thread;
1317 if (!thread_is_stopped(thread)) {
1318 thread_set_flags(thread, SVC_STOPPING);
1319 spin_unlock(&lli->lli_sa_lock);
1320 wake_up(&thread->t_ctl_waitq);
1322 CDEBUG(D_READA, "stop statahead thread: sai %p pid %u\n",
1323 lli->lli_sai, (unsigned int)thread->t_pid);
1324 l_wait_event(thread->t_ctl_waitq,
1325 thread_is_stopped(thread),
1328 spin_unlock(&lli->lli_sa_lock);
1332 * Put the ref which was held when first statahead_enter.
1333 * It maybe not the last ref for some statahead requests
1336 ll_sai_put(lli->lli_sai);
1338 lli->lli_opendir_pid = 0;
1339 spin_unlock(&lli->lli_sa_lock);
1345 * not first dirent, or is "."
1347 LS_NONE_FIRST_DE = 0,
1349 * the first non-hidden dirent
1353 * the first hidden dirent, that is "."
1358 static int is_first_dirent(struct inode *dir, struct dentry *dentry)
1360 struct ll_dir_chain chain;
1361 struct qstr *target = &dentry->d_name;
1365 int rc = LS_NONE_FIRST_DE;
1367 ll_dir_chain_init(&chain);
1368 page = ll_get_dir_page(dir, pos, &chain);
1371 struct lu_dirpage *dp;
1372 struct lu_dirent *ent;
1375 struct ll_inode_info *lli = ll_i2info(dir);
1378 CERROR("error reading dir "DFID" at %llu: [rc %d] [parent %u]\n",
1379 PFID(ll_inode2fid(dir)), pos,
1380 rc, lli->lli_opendir_pid);
1384 dp = page_address(page);
1385 for (ent = lu_dirent_start(dp); ent != NULL;
1386 ent = lu_dirent_next(ent)) {
1391 hash = le64_to_cpu(ent->lde_hash);
1392 /* The ll_get_dir_page() can return any page containing
1393 * the given hash which may be not the start hash. */
1394 if (unlikely(hash < pos))
1397 namelen = le16_to_cpu(ent->lde_namelen);
1398 if (unlikely(namelen == 0))
1400 * skip dummy record.
1404 name = ent->lde_name;
1405 if (name[0] == '.') {
1411 else if (name[1] == '.' && namelen == 2)
1422 if (dot_de && target->name[0] != '.') {
1423 CDEBUG(D_READA, "%.*s skip hidden file %.*s\n",
1424 target->len, target->name,
1429 if (target->len != namelen ||
1430 memcmp(target->name, name, namelen) != 0)
1431 rc = LS_NONE_FIRST_DE;
1435 rc = LS_FIRST_DOT_DE;
1437 ll_release_page(page, 0);
1440 pos = le64_to_cpu(dp->ldp_hash_end);
1441 if (pos == MDS_DIR_END_OFF) {
1443 * End of directory reached.
1445 ll_release_page(page, 0);
1449 * chain is exhausted
1450 * Normal case: continue to the next page.
1452 ll_release_page(page, le32_to_cpu(dp->ldp_flags) &
1454 page = ll_get_dir_page(dir, pos, &chain);
1457 * go into overflow page.
1459 LASSERT(le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
1460 ll_release_page(page, 1);
1465 ll_dir_chain_fini(&chain);
1470 ll_sai_unplug(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
1472 struct ptlrpc_thread *thread = &sai->sai_thread;
1473 struct ll_sb_info *sbi = ll_i2sbi(sai->sai_inode);
1476 if (entry != NULL && entry->se_stat == SA_ENTRY_SUCC)
1481 ll_sa_entry_fini(sai, entry);
1484 sai->sai_consecutive_miss = 0;
1485 sai->sai_max = min(2 * sai->sai_max, sbi->ll_sa_max);
1487 struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
1490 sai->sai_consecutive_miss++;
1491 if (sa_low_hit(sai) && thread_is_running(thread)) {
1492 atomic_inc(&sbi->ll_sa_wrong);
1493 CDEBUG(D_READA, "Statahead for dir " DFID " hit ratio too low: hit/miss %llu/%llu, sent/replied %llu/%llu, stopping statahead thread\n",
1494 PFID(&lli->lli_fid), sai->sai_hit,
1495 sai->sai_miss, sai->sai_sent,
1497 spin_lock(&lli->lli_sa_lock);
1498 if (!thread_is_stopped(thread))
1499 thread_set_flags(thread, SVC_STOPPING);
1500 spin_unlock(&lli->lli_sa_lock);
1504 if (!thread_is_stopped(thread))
1505 wake_up(&thread->t_ctl_waitq);
1509 * Start statahead thread if this is the first dir entry.
1510 * Otherwise if a thread is started already, wait it until it is ahead of me.
1511 * \retval 1 -- find entry with lock in cache, the caller needs to do
1513 * \retval 0 -- find entry in cache, but without lock, the caller needs
1515 * \retval others -- the caller need to process as non-statahead.
1517 int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
1520 struct ll_inode_info *lli = ll_i2info(dir);
1521 struct ll_statahead_info *sai = lli->lli_sai;
1522 struct dentry *parent;
1523 struct ll_sa_entry *entry;
1524 struct ptlrpc_thread *thread;
1525 struct l_wait_info lwi = { 0 };
1527 struct ll_inode_info *plli;
1529 LASSERT(lli->lli_opendir_pid == current_pid());
1532 thread = &sai->sai_thread;
1533 if (unlikely(thread_is_stopped(thread) &&
1534 list_empty(&sai->sai_entries_stated))) {
1535 /* to release resource */
1536 ll_stop_statahead(dir, lli->lli_opendir_key);
1540 if ((*dentryp)->d_name.name[0] == '.') {
1541 if (sai->sai_ls_all ||
1542 sai->sai_miss_hidden >= sai->sai_skip_hidden) {
1544 * Hidden dentry is the first one, or statahead
1545 * thread does not skip so many hidden dentries
1546 * before "sai_ls_all" enabled as below.
1549 if (!sai->sai_ls_all)
1551 * It maybe because hidden dentry is not
1552 * the first one, "sai_ls_all" was not
1553 * set, then "ls -al" missed. Enable
1554 * "sai_ls_all" for such case.
1556 sai->sai_ls_all = 1;
1559 * Such "getattr" has been skipped before
1560 * "sai_ls_all" enabled as above.
1562 sai->sai_miss_hidden++;
1567 entry = ll_sa_entry_get_byname(sai, &(*dentryp)->d_name);
1568 if (entry == NULL || only_unplug) {
1569 ll_sai_unplug(sai, entry);
1570 return entry ? 1 : -EAGAIN;
1573 if (!ll_sa_entry_stated(entry)) {
1574 sai->sai_index_wait = entry->se_index;
1575 lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(30), NULL,
1576 LWI_ON_SIGNAL_NOOP, NULL);
1577 rc = l_wait_event(sai->sai_waitq,
1578 ll_sa_entry_stated(entry) ||
1579 thread_is_stopped(thread),
1582 ll_sai_unplug(sai, entry);
1587 if (entry->se_stat == SA_ENTRY_SUCC &&
1588 entry->se_inode != NULL) {
1589 struct inode *inode = entry->se_inode;
1590 struct lookup_intent it = { .it_op = IT_GETATTR,
1591 .d.lustre.it_lock_handle =
1595 rc = md_revalidate_lock(ll_i2mdexp(dir), &it,
1596 ll_inode2fid(inode), &bits);
1598 if ((*dentryp)->d_inode == NULL) {
1599 struct dentry *alias;
1601 alias = ll_splice_alias(inode,
1603 if (IS_ERR(alias)) {
1604 ll_sai_unplug(sai, entry);
1605 return PTR_ERR(alias);
1608 } else if ((*dentryp)->d_inode != inode) {
1609 /* revalidate, but inode is recreated */
1611 "stale dentry %pd inode %lu/%u, statahead inode %lu/%u\n",
1613 (*dentryp)->d_inode->i_ino,
1614 (*dentryp)->d_inode->i_generation,
1616 inode->i_generation);
1617 ll_sai_unplug(sai, entry);
1622 entry->se_inode = NULL;
1624 if ((bits & MDS_INODELOCK_LOOKUP) &&
1625 d_lustre_invalid(*dentryp))
1626 d_lustre_revalidate(*dentryp);
1627 ll_intent_release(&it);
1631 ll_sai_unplug(sai, entry);
1635 /* I am the "lli_opendir_pid" owner, only me can set "lli_sai". */
1636 rc = is_first_dirent(dir, *dentryp);
1637 if (rc == LS_NONE_FIRST_DE) {
1638 /* It is not "ls -{a}l" operation, no need statahead for it. */
1643 sai = ll_sai_alloc();
1649 sai->sai_ls_all = (rc == LS_FIRST_DOT_DE);
1650 sai->sai_inode = igrab(dir);
1651 if (unlikely(sai->sai_inode == NULL)) {
1652 CWARN("Do not start stat ahead on dying inode "DFID"\n",
1653 PFID(&lli->lli_fid));
1658 /* get parent reference count here, and put it in ll_statahead_thread */
1659 parent = dget((*dentryp)->d_parent);
1660 if (unlikely(sai->sai_inode != parent->d_inode)) {
1661 struct ll_inode_info *nlli = ll_i2info(parent->d_inode);
1663 CWARN("Race condition, someone changed %pd just now: old parent "DFID", new parent "DFID"\n",
1665 PFID(&lli->lli_fid), PFID(&nlli->lli_fid));
1667 iput(sai->sai_inode);
1672 CDEBUG(D_READA, "start statahead thread: sai %p, parent %pd\n",
1675 /* The sai buffer already has one reference taken at allocation time,
1676 * but as soon as we expose the sai by attaching it to the lli that
1677 * default reference can be dropped by another thread calling
1678 * ll_stop_statahead. We need to take a local reference to protect
1679 * the sai buffer while we intend to access it. */
1683 plli = ll_i2info(parent->d_inode);
1684 rc = PTR_ERR(kthread_run(ll_statahead_thread, parent,
1685 "ll_sa_%u", plli->lli_opendir_pid));
1686 thread = &sai->sai_thread;
1687 if (IS_ERR_VALUE(rc)) {
1688 CERROR("can't start ll_sa thread, rc: %d\n", rc);
1690 lli->lli_opendir_key = NULL;
1691 thread_set_flags(thread, SVC_STOPPED);
1692 thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED);
1693 /* Drop both our own local reference and the default
1694 * reference from allocation time. */
1697 LASSERT(lli->lli_sai == NULL);
1701 l_wait_event(thread->t_ctl_waitq,
1702 thread_is_running(thread) || thread_is_stopped(thread),
1707 * We don't stat-ahead for the first dirent since we are already in
1715 spin_lock(&lli->lli_sa_lock);
1716 lli->lli_opendir_key = NULL;
1717 lli->lli_opendir_pid = 0;
1718 spin_unlock(&lli->lli_sa_lock);