#define UID_STATE_TOTAL_CURR 2
#define UID_STATE_TOTAL_LAST 3
-#define UID_STATE_SIZE 4
+#define UID_STATE_DEAD_TASKS 4
+#define UID_STATE_SIZE 5
struct uid_entry {
uid_t uid;
return task->ioac.write_bytes - task->ioac.cancelled_write_bytes;
}
-static void add_uid_io_curr_stats(struct uid_entry *uid_entry,
- struct task_struct *task)
+static void add_uid_io_stats(struct uid_entry *uid_entry,
+ struct task_struct *task, int slot)
{
- struct io_stats *io_curr = &uid_entry->io[UID_STATE_TOTAL_CURR];
+ struct io_stats *io_slot = &uid_entry->io[slot];
- io_curr->read_bytes += task->ioac.read_bytes;
- io_curr->write_bytes += compute_write_bytes(task);
- io_curr->rchar += task->ioac.rchar;
- io_curr->wchar += task->ioac.wchar;
- io_curr->fsync += task->ioac.syscfs;
+ io_slot->read_bytes += task->ioac.read_bytes;
+ io_slot->write_bytes += compute_write_bytes(task);
+ io_slot->rchar += task->ioac.rchar;
+ io_slot->wchar += task->ioac.wchar;
+ io_slot->fsync += task->ioac.syscfs;
}
-static void clean_uid_io_last_stats(struct uid_entry *uid_entry,
- struct task_struct *task)
+static void compute_uid_io_bucket_stats(struct io_stats *io_bucket,
+ struct io_stats *io_curr,
+ struct io_stats *io_last,
+ struct io_stats *io_dead)
{
- struct io_stats *io_last = &uid_entry->io[UID_STATE_TOTAL_LAST];
+ io_bucket->read_bytes += io_curr->read_bytes + io_dead->read_bytes -
+ io_last->read_bytes;
+ io_bucket->write_bytes += io_curr->write_bytes + io_dead->write_bytes -
+ io_last->write_bytes;
+ io_bucket->rchar += io_curr->rchar + io_dead->rchar - io_last->rchar;
+ io_bucket->wchar += io_curr->wchar + io_dead->wchar - io_last->wchar;
+ io_bucket->fsync += io_curr->fsync + io_dead->fsync - io_last->fsync;
- io_last->read_bytes -= task->ioac.read_bytes;
- io_last->write_bytes -= compute_write_bytes(task);
- io_last->rchar -= task->ioac.rchar;
- io_last->wchar -= task->ioac.wchar;
- io_last->fsync -= task->ioac.syscfs;
+ io_last->read_bytes = io_curr->read_bytes;
+ io_last->write_bytes = io_curr->write_bytes;
+ io_last->rchar = io_curr->rchar;
+ io_last->wchar = io_curr->wchar;
+ io_last->fsync = io_curr->fsync;
+
+ memset(io_dead, 0, sizeof(struct io_stats));
}
static void update_io_stats_all_locked(void)
{
struct uid_entry *uid_entry;
struct task_struct *task, *temp;
- struct io_stats *io_bucket, *io_curr, *io_last;
struct user_namespace *user_ns = current_user_ns();
unsigned long bkt;
uid_t uid;
uid_entry = find_or_register_uid(uid);
if (!uid_entry)
continue;
- add_uid_io_curr_stats(uid_entry, task);
+ add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR);
} while_each_thread(temp, task);
rcu_read_unlock();
hash_for_each(hash_table, bkt, uid_entry, hash) {
- io_bucket = &uid_entry->io[uid_entry->state];
- io_curr = &uid_entry->io[UID_STATE_TOTAL_CURR];
- io_last = &uid_entry->io[UID_STATE_TOTAL_LAST];
-
- io_bucket->read_bytes +=
- io_curr->read_bytes - io_last->read_bytes;
- io_bucket->write_bytes +=
- io_curr->write_bytes - io_last->write_bytes;
- io_bucket->rchar += io_curr->rchar - io_last->rchar;
- io_bucket->wchar += io_curr->wchar - io_last->wchar;
- io_bucket->fsync += io_curr->fsync - io_last->fsync;
-
- io_last->read_bytes = io_curr->read_bytes;
- io_last->write_bytes = io_curr->write_bytes;
- io_last->rchar = io_curr->rchar;
- io_last->wchar = io_curr->wchar;
- io_last->fsync = io_curr->fsync;
+ compute_uid_io_bucket_stats(&uid_entry->io[uid_entry->state],
+ &uid_entry->io[UID_STATE_TOTAL_CURR],
+ &uid_entry->io[UID_STATE_TOTAL_LAST],
+ &uid_entry->io[UID_STATE_DEAD_TASKS]);
}
}
-static void update_io_stats_uid_locked(uid_t target_uid)
+static void update_io_stats_uid_locked(struct uid_entry *uid_entry)
{
- struct uid_entry *uid_entry;
struct task_struct *task, *temp;
- struct io_stats *io_bucket, *io_curr, *io_last;
struct user_namespace *user_ns = current_user_ns();
- uid_entry = find_or_register_uid(target_uid);
- if (!uid_entry)
- return;
-
memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
sizeof(struct io_stats));
rcu_read_lock();
do_each_thread(temp, task) {
- if (from_kuid_munged(user_ns, task_uid(task)) != target_uid)
+ if (from_kuid_munged(user_ns, task_uid(task)) != uid_entry->uid)
continue;
- add_uid_io_curr_stats(uid_entry, task);
+ add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR);
} while_each_thread(temp, task);
rcu_read_unlock();
- io_bucket = &uid_entry->io[uid_entry->state];
- io_curr = &uid_entry->io[UID_STATE_TOTAL_CURR];
- io_last = &uid_entry->io[UID_STATE_TOTAL_LAST];
-
- io_bucket->read_bytes +=
- io_curr->read_bytes - io_last->read_bytes;
- io_bucket->write_bytes +=
- io_curr->write_bytes - io_last->write_bytes;
- io_bucket->rchar += io_curr->rchar - io_last->rchar;
- io_bucket->wchar += io_curr->wchar - io_last->wchar;
- io_bucket->fsync += io_curr->fsync - io_last->fsync;
-
- io_last->read_bytes = io_curr->read_bytes;
- io_last->write_bytes = io_curr->write_bytes;
- io_last->rchar = io_curr->rchar;
- io_last->wchar = io_curr->wchar;
- io_last->fsync = io_curr->fsync;
+ compute_uid_io_bucket_stats(&uid_entry->io[uid_entry->state],
+ &uid_entry->io[UID_STATE_TOTAL_CURR],
+ &uid_entry->io[UID_STATE_TOTAL_LAST],
+ &uid_entry->io[UID_STATE_DEAD_TASKS]);
}
static int uid_io_show(struct seq_file *m, void *v)
return count;
}
- update_io_stats_uid_locked(uid);
+ update_io_stats_uid_locked(uid_entry);
uid_entry->state = state;
uid_entry->utime += utime;
uid_entry->stime += stime;
- update_io_stats_uid_locked(uid);
- clean_uid_io_last_stats(uid_entry, task);
+ add_uid_io_stats(uid_entry, task, UID_STATE_DEAD_TASKS);
exit:
rt_mutex_unlock(&uid_lock);