2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <engine/fifo.h>
26 #include <core/client.h>
27 #include <core/device.h>
28 #include <core/handle.h>
29 #include <core/notify.h>
30 #include <engine/dmaobj.h>
32 #include <nvif/class.h>
33 #include <nvif/event.h>
34 #include <nvif/unpack.h>
37 nvkm_fifo_event_ctor(struct nvkm_object *object, void *data, u32 size,
38 struct nvkm_notify *notify)
49 static const struct nvkm_event_func
50 nvkm_fifo_event_func = {
51 .ctor = nvkm_fifo_event_ctor,
55 nvkm_fifo_channel_create_(struct nvkm_object *parent,
56 struct nvkm_object *engine,
57 struct nvkm_oclass *oclass,
58 int bar, u32 addr, u32 size, u32 pushbuf,
59 u64 engmask, int len, void **ptr)
61 struct nvkm_device *device = nv_device(engine);
62 struct nvkm_fifo *priv = (void *)engine;
63 struct nvkm_fifo_chan *chan;
64 struct nvkm_dmaeng *dmaeng;
68 /* create base object class */
69 ret = nvkm_namedb_create_(parent, engine, oclass, 0, NULL,
75 /* validate dma object representing push buffer */
76 chan->pushdma = (void *)nvkm_handle_ref(parent, pushbuf);
80 dmaeng = (void *)chan->pushdma->base.engine;
81 switch (chan->pushdma->base.oclass->handle) {
82 case NV_DMA_FROM_MEMORY:
83 case NV_DMA_IN_MEMORY:
89 ret = dmaeng->bind(chan->pushdma, parent, &chan->pushgpu);
93 /* find a free fifo channel */
94 spin_lock_irqsave(&priv->lock, flags);
95 for (chan->chid = priv->min; chan->chid < priv->max; chan->chid++) {
96 if (!priv->channel[chan->chid]) {
97 priv->channel[chan->chid] = nv_object(chan);
101 spin_unlock_irqrestore(&priv->lock, flags);
103 if (chan->chid == priv->max) {
104 nv_error(priv, "no free channels\n");
108 chan->addr = nv_device_resource_start(device, bar) +
109 addr + size * chan->chid;
111 nvkm_event_send(&priv->cevent, 1, 0, NULL, 0);
116 nvkm_fifo_channel_destroy(struct nvkm_fifo_chan *chan)
118 struct nvkm_fifo *priv = (void *)nv_object(chan)->engine;
124 spin_lock_irqsave(&priv->lock, flags);
125 priv->channel[chan->chid] = NULL;
126 spin_unlock_irqrestore(&priv->lock, flags);
128 nvkm_gpuobj_ref(NULL, &chan->pushgpu);
129 nvkm_object_ref(NULL, (struct nvkm_object **)&chan->pushdma);
130 nvkm_namedb_destroy(&chan->namedb);
134 _nvkm_fifo_channel_dtor(struct nvkm_object *object)
136 struct nvkm_fifo_chan *chan = (void *)object;
137 nvkm_fifo_channel_destroy(chan);
141 _nvkm_fifo_channel_map(struct nvkm_object *object, u64 *addr, u32 *size)
143 struct nvkm_fifo_chan *chan = (void *)object;
150 _nvkm_fifo_channel_rd32(struct nvkm_object *object, u64 addr)
152 struct nvkm_fifo_chan *chan = (void *)object;
153 if (unlikely(!chan->user)) {
154 chan->user = ioremap(chan->addr, chan->size);
155 if (WARN_ON_ONCE(chan->user == NULL))
158 return ioread32_native(chan->user + addr);
162 _nvkm_fifo_channel_wr32(struct nvkm_object *object, u64 addr, u32 data)
164 struct nvkm_fifo_chan *chan = (void *)object;
165 if (unlikely(!chan->user)) {
166 chan->user = ioremap(chan->addr, chan->size);
167 if (WARN_ON_ONCE(chan->user == NULL))
170 iowrite32_native(data, chan->user + addr);
174 nvkm_fifo_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
175 struct nvkm_notify *notify)
178 struct nvif_notify_uevent_req none;
182 if (nvif_unvers(req->none)) {
183 notify->size = sizeof(struct nvif_notify_uevent_rep);
192 nvkm_fifo_uevent(struct nvkm_fifo *fifo)
194 struct nvif_notify_uevent_rep rep = {
196 nvkm_event_send(&fifo->uevent, 1, 0, &rep, sizeof(rep));
200 _nvkm_fifo_channel_ntfy(struct nvkm_object *object, u32 type,
201 struct nvkm_event **event)
203 struct nvkm_fifo *fifo = (void *)object->engine;
205 case G82_CHANNEL_DMA_V0_NTFY_UEVENT:
206 if (nv_mclass(object) >= G82_CHANNEL_DMA) {
207 *event = &fifo->uevent;
218 nvkm_fifo_chid(struct nvkm_fifo *priv, struct nvkm_object *object)
220 int engidx = nv_hclass(priv) & 0xff;
222 while (object && object->parent) {
223 if ( nv_iclass(object->parent, NV_ENGCTX_CLASS) &&
224 (nv_hclass(object->parent) & 0xff) == engidx)
225 return nvkm_fifo_chan(object)->chid;
226 object = object->parent;
233 nvkm_client_name_for_fifo_chid(struct nvkm_fifo *fifo, u32 chid)
235 struct nvkm_fifo_chan *chan = NULL;
238 spin_lock_irqsave(&fifo->lock, flags);
239 if (chid >= fifo->min && chid <= fifo->max)
240 chan = (void *)fifo->channel[chid];
241 spin_unlock_irqrestore(&fifo->lock, flags);
243 return nvkm_client_name(chan);
247 nvkm_fifo_destroy(struct nvkm_fifo *priv)
249 kfree(priv->channel);
250 nvkm_event_fini(&priv->uevent);
251 nvkm_event_fini(&priv->cevent);
252 nvkm_engine_destroy(&priv->base);
256 nvkm_fifo_create_(struct nvkm_object *parent, struct nvkm_object *engine,
257 struct nvkm_oclass *oclass,
258 int min, int max, int length, void **pobject)
260 struct nvkm_fifo *priv;
263 ret = nvkm_engine_create_(parent, engine, oclass, true, "PFIFO",
264 "fifo", length, pobject);
271 priv->channel = kzalloc(sizeof(*priv->channel) * (max + 1), GFP_KERNEL);
275 ret = nvkm_event_init(&nvkm_fifo_event_func, 1, 1, &priv->cevent);
279 priv->chid = nvkm_fifo_chid;
280 spin_lock_init(&priv->lock);