2 * Tegra host1x Syncpoints
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/module.h>
20 #include <linux/device.h>
21 #include <linux/slab.h>
23 #include <trace/events/host1x.h>
30 #define SYNCPT_CHECK_PERIOD (2 * HZ)
31 #define MAX_STUCK_CHECK_COUNT 15
33 static struct host1x_syncpt *_host1x_syncpt_alloc(struct host1x *host,
38 struct host1x_syncpt *sp = host->syncpt;
41 for (i = 0; i < host->info->nb_pts && sp->name; i++, sp++)
46 name = kasprintf(GFP_KERNEL, "%02d-%s", sp->id,
47 dev ? dev_name(dev) : NULL);
53 sp->client_managed = client_managed;
58 u32 host1x_syncpt_id(struct host1x_syncpt *sp)
64 * Updates the value sent to hardware.
66 u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs)
68 return (u32)atomic_add_return(incrs, &sp->max_val);
72 * Write cached syncpoint and waitbase values to hardware.
74 void host1x_syncpt_restore(struct host1x *host)
76 struct host1x_syncpt *sp_base = host->syncpt;
79 for (i = 0; i < host1x_syncpt_nb_pts(host); i++)
80 host1x_hw_syncpt_restore(host, sp_base + i);
81 for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
82 host1x_hw_syncpt_restore_wait_base(host, sp_base + i);
87 * Update the cached syncpoint and waitbase values by reading them
90 void host1x_syncpt_save(struct host1x *host)
92 struct host1x_syncpt *sp_base = host->syncpt;
95 for (i = 0; i < host1x_syncpt_nb_pts(host); i++) {
96 if (host1x_syncpt_client_managed(sp_base + i))
97 host1x_hw_syncpt_load(host, sp_base + i);
99 WARN_ON(!host1x_syncpt_idle(sp_base + i));
102 for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
103 host1x_hw_syncpt_load_wait_base(host, sp_base + i);
107 * Updates the cached syncpoint value by reading a new value from the hardware
110 u32 host1x_syncpt_load(struct host1x_syncpt *sp)
113 val = host1x_hw_syncpt_load(sp->host, sp);
114 trace_host1x_syncpt_load_min(sp->id, val);
120 * Get the current syncpoint base
122 u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp)
125 host1x_hw_syncpt_load_wait_base(sp->host, sp);
131 * Write a cpu syncpoint increment to the hardware, without touching
132 * the cache. Caller is responsible for host being powered.
134 void host1x_syncpt_cpu_incr(struct host1x_syncpt *sp)
136 host1x_hw_syncpt_cpu_incr(sp->host, sp);
140 * Increment syncpoint value from cpu, updating cache
142 void host1x_syncpt_incr(struct host1x_syncpt *sp)
144 if (host1x_syncpt_client_managed(sp))
145 host1x_syncpt_incr_max(sp, 1);
146 host1x_syncpt_cpu_incr(sp);
150 * Updated sync point form hardware, and returns true if syncpoint is expired,
151 * false if we may need to wait
153 static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh)
155 host1x_hw_syncpt_load(sp->host, sp);
156 return host1x_syncpt_is_expired(sp, thresh);
160 * Main entrypoint for syncpoint value waits.
162 int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
165 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
167 struct host1x_waitlist *waiter;
168 int err = 0, check_count = 0;
174 /* first check cache */
175 if (host1x_syncpt_is_expired(sp, thresh)) {
177 *value = host1x_syncpt_load(sp);
181 /* try to read from register */
182 val = host1x_hw_syncpt_load(sp->host, sp);
183 if (host1x_syncpt_is_expired(sp, thresh)) {
194 /* allocate a waiter */
195 waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
201 /* schedule a wakeup when the syncpoint value is reached */
202 err = host1x_intr_add_action(sp->host, sp->id, thresh,
203 HOST1X_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
209 /* Caller-specified timeout may be impractically low */
213 /* wait for the syncpoint, or timeout, or signal */
215 long check = min_t(long, SYNCPT_CHECK_PERIOD, timeout);
216 int remain = wait_event_interruptible_timeout(wq,
217 syncpt_load_min_is_expired(sp, thresh),
219 if (remain > 0 || host1x_syncpt_is_expired(sp, thresh)) {
221 *value = host1x_syncpt_load(sp);
230 if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) {
231 dev_warn(sp->host->dev,
232 "%s: syncpoint id %d (%s) stuck waiting %d, timeout=%ld\n",
233 current->comm, sp->id, sp->name,
236 host1x_debug_dump_syncpts(sp->host);
237 if (check_count == MAX_STUCK_CHECK_COUNT)
238 host1x_debug_dump(sp->host);
242 host1x_intr_put_ref(sp->host, sp->id, ref);
247 EXPORT_SYMBOL(host1x_syncpt_wait);
250 * Returns true if syncpoint is expired, false if we may need to wait
252 bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh)
257 current_val = (u32)atomic_read(&sp->min_val);
258 future_val = (u32)atomic_read(&sp->max_val);
260 /* Note the use of unsigned arithmetic here (mod 1<<32).
262 * c = current_val = min_val = the current value of the syncpoint.
263 * t = thresh = the value we are checking
264 * f = future_val = max_val = the value c will reach when all
265 * outstanding increments have completed.
267 * Note that c always chases f until it reaches f.
272 * Consider all cases:
274 * A) .....c..t..f..... Dtf < Dtc need to wait
275 * B) .....c.....f..t.. Dtf > Dtc expired
276 * C) ..t..c.....f..... Dtf > Dtc expired (Dct very large)
278 * Any case where f==c: always expired (for any t). Dtf == Dcf
279 * Any case where t==c: always expired (for any f). Dtf >= Dtc (because Dtc==0)
280 * Any case where t==f!=c: always wait. Dtf < Dtc (because Dtf==0,
285 * A) .....t..f..c..... Dtf < Dtc need to wait
286 * A) .....f..c..t..... Dtf < Dtc need to wait
287 * A) .....f..t..c..... Dtf > Dtc expired
290 * Dtf >= Dtc implies EXPIRED (return true)
291 * Dtf < Dtc implies WAIT (return false)
293 * Note: If t is expired then we *cannot* wait on it. We would wait
294 * forever (hang the system).
296 * Note: do NOT get clever and remove the -thresh from both sides. It
299 * If future valueis zero, we have a client managed sync point. In that
300 * case we do a direct comparison.
302 if (!host1x_syncpt_client_managed(sp))
303 return future_val - thresh >= current_val - thresh;
305 return (s32)(current_val - thresh) >= 0;
308 /* remove a wait pointed to by patch_addr */
309 int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
311 return host1x_hw_syncpt_patch_wait(sp->host, sp, patch_addr);
314 int host1x_syncpt_init(struct host1x *host)
316 struct host1x_syncpt *syncpt;
319 syncpt = devm_kzalloc(host->dev, sizeof(*syncpt) * host->info->nb_pts,
324 for (i = 0; i < host->info->nb_pts; ++i) {
326 syncpt[i].host = host;
329 host->syncpt = syncpt;
331 host1x_syncpt_restore(host);
333 /* Allocate sync point to use for clearing waits for expired fences */
334 host->nop_sp = _host1x_syncpt_alloc(host, NULL, 0);
341 struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
344 struct host1x *host = dev_get_drvdata(dev->parent);
345 return _host1x_syncpt_alloc(host, dev, client_managed);
348 void host1x_syncpt_free(struct host1x_syncpt *sp)
356 sp->client_managed = 0;
359 void host1x_syncpt_deinit(struct host1x *host)
362 struct host1x_syncpt *sp = host->syncpt;
363 for (i = 0; i < host->info->nb_pts; i++, sp++)
367 int host1x_syncpt_nb_pts(struct host1x *host)
369 return host->info->nb_pts;
372 int host1x_syncpt_nb_bases(struct host1x *host)
374 return host->info->nb_bases;
377 int host1x_syncpt_nb_mlocks(struct host1x *host)
379 return host->info->nb_mlocks;
382 struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id)
384 if (host->info->nb_pts < id)
386 return host->syncpt + id;