1 /*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27 #ifndef __NOUVEAU_DMA_H__
28 #define __NOUVEAU_DMA_H__
29
30 #ifndef NOUVEAU_DMA_DEBUG
31 #define NOUVEAU_DMA_DEBUG 0
32 #endif
33
34 void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *,
35 int delta, int length);
36
37 /*
38 * There's a hw race condition where you can't jump to your PUT offset,
39 * to avoid this we jump to offset + SKIPS and fill the difference with
40 * NOPs.
41 *
42 * xf86-video-nv configures the DMA fetch size to 32 bytes, and uses
43 * a SKIPS value of 8. Lets assume that the race condition is to do
44 * with writing into the fetch area, we configure a fetch size of 128
45 * bytes so we need a larger SKIPS value.
46 */
47 #define NOUVEAU_DMA_SKIPS (128 / 4)
48
49 /* Hardcoded object assignments to subchannels (subchannel id). */
50 enum {
51 NvSubM2MF = 0,
52 NvSubSw = 1,
53 NvSub2D = 2,
54 NvSubCtxSurf2D = 2,
55 NvSubGdiRect = 3,
56 NvSubImageBlit = 4
57 };
58
59 /* Object handles. */
60 enum {
61 NvM2MF = 0x80000001,
62 NvDmaFB = 0x80000002,
63 NvDmaTT = 0x80000003,
64 NvNotify0 = 0x80000006,
65 Nv2D = 0x80000007,
66 NvCtxSurf2D = 0x80000008,
67 NvRop = 0x80000009,
68 NvImagePatt = 0x8000000a,
69 NvClipRect = 0x8000000b,
70 NvGdiRect = 0x8000000c,
71 NvImageBlit = 0x8000000d,
72 NvSw = 0x8000000e,
73 NvSema = 0x8000000f,
74 NvEvoSema0 = 0x80000010,
75 NvEvoSema1 = 0x80000011,
76
77 /* G80+ display objects */
78 NvEvoVRAM = 0x01000000,
79 NvEvoFB16 = 0x01000001,
80 NvEvoFB32 = 0x01000002,
81 NvEvoVRAM_LP = 0x01000003,
82 NvEvoSync = 0xcafe0000
83 };
84
85 #define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039
86 #define NV_MEMORY_TO_MEMORY_FORMAT_NAME 0x00000000
87 #define NV_MEMORY_TO_MEMORY_FORMAT_SET_REF 0x00000050
88 #define NV_MEMORY_TO_MEMORY_FORMAT_NOP 0x00000100
89 #define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY 0x00000104
90 #define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE 0x00000000
91 #define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE_LE_AWAKEN 0x00000001
92 #define NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY 0x00000180
93 #define NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE 0x00000184
94 #define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN 0x0000030c
95
96 #define NV50_MEMORY_TO_MEMORY_FORMAT 0x00005039
97 #define NV50_MEMORY_TO_MEMORY_FORMAT_UNK200 0x00000200
98 #define NV50_MEMORY_TO_MEMORY_FORMAT_UNK21C 0x0000021c
99 #define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN_HIGH 0x00000238
100 #define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_OUT_HIGH 0x0000023c
101
102 static __must_check inline int
RING_SPACE(struct nouveau_channel * chan,int size)103 RING_SPACE(struct nouveau_channel *chan, int size)
104 {
105 int ret;
106
107 ret = nouveau_dma_wait(chan, 1, size);
108 if (ret)
109 return ret;
110
111 chan->dma.free -= size;
112 return 0;
113 }
114
115 static inline void
OUT_RING(struct nouveau_channel * chan,int data)116 OUT_RING(struct nouveau_channel *chan, int data)
117 {
118 if (NOUVEAU_DMA_DEBUG) {
119 NV_INFO(chan->dev, "Ch%d/0x%08x: 0x%08x\n",
120 chan->id, chan->dma.cur << 2, data);
121 }
122
123 nouveau_bo_wr32(chan->pushbuf_bo, chan->dma.cur++, data);
124 }
125
126 extern void
127 OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords);
128
129 static inline void
BEGIN_NVC0(struct nouveau_channel * chan,int op,int subc,int mthd,int size)130 BEGIN_NVC0(struct nouveau_channel *chan, int op, int subc, int mthd, int size)
131 {
132 OUT_RING(chan, (op << 28) | (size << 16) | (subc << 13) | (mthd >> 2));
133 }
134
135 static inline void
BEGIN_RING(struct nouveau_channel * chan,int subc,int mthd,int size)136 BEGIN_RING(struct nouveau_channel *chan, int subc, int mthd, int size)
137 {
138 OUT_RING(chan, (subc << 13) | (size << 18) | mthd);
139 }
140
141 #define WRITE_PUT(val) do { \
142 DRM_MEMORYBARRIER(); \
143 nouveau_bo_rd32(chan->pushbuf_bo, 0); \
144 nvchan_wr32(chan, chan->user_put, ((val) << 2) + chan->pushbuf_base); \
145 } while (0)
146
147 static inline void
FIRE_RING(struct nouveau_channel * chan)148 FIRE_RING(struct nouveau_channel *chan)
149 {
150 if (NOUVEAU_DMA_DEBUG) {
151 NV_INFO(chan->dev, "Ch%d/0x%08x: PUSH!\n",
152 chan->id, chan->dma.cur << 2);
153 }
154
155 if (chan->dma.cur == chan->dma.put)
156 return;
157 chan->accel_done = true;
158
159 if (chan->dma.ib_max) {
160 nv50_dma_push(chan, chan->pushbuf_bo, chan->dma.put << 2,
161 (chan->dma.cur - chan->dma.put) << 2);
162 } else {
163 WRITE_PUT(chan->dma.cur);
164 }
165
166 chan->dma.put = chan->dma.cur;
167 }
168
169 static inline void
WIND_RING(struct nouveau_channel * chan)170 WIND_RING(struct nouveau_channel *chan)
171 {
172 chan->dma.cur = chan->dma.put;
173 }
174
175 #endif
176