2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 #include <core/client.h>
28 #include <core/gpuobj.h>
29 #include <core/notify.h>
31 #include <nvif/event.h>
32 #include <nvif/unpack.h>
35 nvkm_fifo_recover_chan(struct nvkm_fifo *fifo, int chid)
38 if (WARN_ON(!fifo->func->recover_chan))
40 spin_lock_irqsave(&fifo->lock, flags);
41 fifo->func->recover_chan(fifo, chid);
42 spin_unlock_irqrestore(&fifo->lock, flags);
46 nvkm_fifo_pause(struct nvkm_fifo *fifo, unsigned long *flags)
48 return fifo->func->pause(fifo, flags);
52 nvkm_fifo_start(struct nvkm_fifo *fifo, unsigned long *flags)
54 return fifo->func->start(fifo, flags);
58 nvkm_fifo_chan_put(struct nvkm_fifo *fifo, unsigned long flags,
59 struct nvkm_fifo_chan **pchan)
61 struct nvkm_fifo_chan *chan = *pchan;
64 spin_unlock_irqrestore(&fifo->lock, flags);
68 struct nvkm_fifo_chan *
69 nvkm_fifo_chan_inst_locked(struct nvkm_fifo *fifo, u64 inst)
71 struct nvkm_fifo_chan *chan;
72 list_for_each_entry(chan, &fifo->chan, head) {
73 if (chan->inst->addr == inst) {
74 list_del(&chan->head);
75 list_add(&chan->head, &fifo->chan);
82 struct nvkm_fifo_chan *
83 nvkm_fifo_chan_inst(struct nvkm_fifo *fifo, u64 inst, unsigned long *rflags)
85 struct nvkm_fifo_chan *chan;
87 spin_lock_irqsave(&fifo->lock, flags);
88 if ((chan = nvkm_fifo_chan_inst_locked(fifo, inst))) {
92 spin_unlock_irqrestore(&fifo->lock, flags);
96 struct nvkm_fifo_chan *
97 nvkm_fifo_chan_chid(struct nvkm_fifo *fifo, int chid, unsigned long *rflags)
99 struct nvkm_fifo_chan *chan;
101 spin_lock_irqsave(&fifo->lock, flags);
102 list_for_each_entry(chan, &fifo->chan, head) {
103 if (chan->chid == chid) {
104 list_del(&chan->head);
105 list_add(&chan->head, &fifo->chan);
110 spin_unlock_irqrestore(&fifo->lock, flags);
115 nvkm_fifo_kevent(struct nvkm_fifo *fifo, int chid)
117 nvkm_event_send(&fifo->kevent, 1, chid, NULL, 0);
121 nvkm_fifo_kevent_ctor(struct nvkm_object *object, void *data, u32 size,
122 struct nvkm_notify *notify)
124 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
128 notify->index = chan->chid;
134 static const struct nvkm_event_func
135 nvkm_fifo_kevent_func = {
136 .ctor = nvkm_fifo_kevent_ctor,
140 nvkm_fifo_cevent_ctor(struct nvkm_object *object, void *data, u32 size,
141 struct nvkm_notify *notify)
152 static const struct nvkm_event_func
153 nvkm_fifo_cevent_func = {
154 .ctor = nvkm_fifo_cevent_ctor,
158 nvkm_fifo_cevent(struct nvkm_fifo *fifo)
160 nvkm_event_send(&fifo->cevent, 1, 0, NULL, 0);
164 nvkm_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
166 struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
167 fifo->func->uevent_fini(fifo);
171 nvkm_fifo_uevent_init(struct nvkm_event *event, int type, int index)
173 struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
174 fifo->func->uevent_init(fifo);
178 nvkm_fifo_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
179 struct nvkm_notify *notify)
182 struct nvif_notify_uevent_req none;
186 if (!(ret = nvif_unvers(ret, &data, &size, req->none))) {
187 notify->size = sizeof(struct nvif_notify_uevent_rep);
195 static const struct nvkm_event_func
196 nvkm_fifo_uevent_func = {
197 .ctor = nvkm_fifo_uevent_ctor,
198 .init = nvkm_fifo_uevent_init,
199 .fini = nvkm_fifo_uevent_fini,
203 nvkm_fifo_uevent(struct nvkm_fifo *fifo)
205 struct nvif_notify_uevent_rep rep = {
207 nvkm_event_send(&fifo->uevent, 1, 0, &rep, sizeof(rep));
211 nvkm_fifo_class_new(struct nvkm_device *device,
212 const struct nvkm_oclass *oclass, void *data, u32 size,
213 struct nvkm_object **pobject)
215 const struct nvkm_fifo_chan_oclass *sclass = oclass->engn;
216 struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
217 return sclass->ctor(fifo, oclass, data, size, pobject);
220 static const struct nvkm_device_oclass
222 .ctor = nvkm_fifo_class_new,
226 nvkm_fifo_class_get(struct nvkm_oclass *oclass, int index,
227 const struct nvkm_device_oclass **class)
229 struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
230 const struct nvkm_fifo_chan_oclass *sclass;
233 if (fifo->func->class_get) {
234 int ret = fifo->func->class_get(fifo, index, &sclass);
236 oclass->base = sclass->base;
237 oclass->engn = sclass;
238 *class = &nvkm_fifo_class;
244 while ((sclass = fifo->func->chan[c])) {
246 oclass->base = sclass->base;
247 oclass->engn = sclass;
248 *class = &nvkm_fifo_class;
257 nvkm_fifo_intr(struct nvkm_engine *engine)
259 struct nvkm_fifo *fifo = nvkm_fifo(engine);
260 fifo->func->intr(fifo);
264 nvkm_fifo_fini(struct nvkm_engine *engine, bool suspend)
266 struct nvkm_fifo *fifo = nvkm_fifo(engine);
267 if (fifo->func->fini)
268 fifo->func->fini(fifo);
273 nvkm_fifo_oneinit(struct nvkm_engine *engine)
275 struct nvkm_fifo *fifo = nvkm_fifo(engine);
276 if (fifo->func->oneinit)
277 return fifo->func->oneinit(fifo);
282 nvkm_fifo_init(struct nvkm_engine *engine)
284 struct nvkm_fifo *fifo = nvkm_fifo(engine);
285 fifo->func->init(fifo);
290 nvkm_fifo_dtor(struct nvkm_engine *engine)
292 struct nvkm_fifo *fifo = nvkm_fifo(engine);
294 if (fifo->func->dtor)
295 data = fifo->func->dtor(fifo);
296 nvkm_event_fini(&fifo->kevent);
297 nvkm_event_fini(&fifo->cevent);
298 nvkm_event_fini(&fifo->uevent);
302 static const struct nvkm_engine_func
304 .dtor = nvkm_fifo_dtor,
305 .oneinit = nvkm_fifo_oneinit,
306 .init = nvkm_fifo_init,
307 .fini = nvkm_fifo_fini,
308 .intr = nvkm_fifo_intr,
309 .base.sclass = nvkm_fifo_class_get,
313 nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device,
314 int index, int nr, struct nvkm_fifo *fifo)
319 INIT_LIST_HEAD(&fifo->chan);
320 spin_lock_init(&fifo->lock);
322 if (WARN_ON(fifo->nr > NVKM_FIFO_CHID_NR))
323 fifo->nr = NVKM_FIFO_CHID_NR;
326 bitmap_clear(fifo->mask, 0, fifo->nr);
328 ret = nvkm_engine_ctor(&nvkm_fifo, device, index, true, &fifo->engine);
332 if (func->uevent_init) {
333 ret = nvkm_event_init(&nvkm_fifo_uevent_func, 1, 1,
339 ret = nvkm_event_init(&nvkm_fifo_cevent_func, 1, 1, &fifo->cevent);
343 return nvkm_event_init(&nvkm_fifo_kevent_func, 1, nr, &fifo->kevent);