1 /*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24 #include "gf100.h"
25 #include "ctxgf100.h"
26
27 #include <core/firmware.h>
28 #include <subdev/acr.h>
29 #include <subdev/secboot.h>
30
31 #include <nvfw/flcn.h>
32
33 #include <nvif/class.h>
34
35 int
gm200_gr_nofw(struct gf100_gr * gr,int ver,const struct gf100_gr_fwif * fwif)36 gm200_gr_nofw(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
37 {
38 nvkm_warn(&gr->base.engine.subdev, "firmware unavailable\n");
39 return -ENODEV;
40 }
41
42 /*******************************************************************************
43 * PGRAPH engine/subdev functions
44 ******************************************************************************/
45
46 static void
gm200_gr_acr_bld_patch(struct nvkm_acr * acr,u32 bld,s64 adjust)47 gm200_gr_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust)
48 {
49 struct flcn_bl_dmem_desc_v1 hdr;
50 nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
51 hdr.code_dma_base = hdr.code_dma_base + adjust;
52 hdr.data_dma_base = hdr.data_dma_base + adjust;
53 nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
54 flcn_bl_dmem_desc_v1_dump(&acr->subdev, &hdr);
55 }
56
57 static void
gm200_gr_acr_bld_write(struct nvkm_acr * acr,u32 bld,struct nvkm_acr_lsfw * lsfw)58 gm200_gr_acr_bld_write(struct nvkm_acr *acr, u32 bld,
59 struct nvkm_acr_lsfw *lsfw)
60 {
61 const u64 base = lsfw->offset.img + lsfw->app_start_offset;
62 const u64 code = base + lsfw->app_resident_code_offset;
63 const u64 data = base + lsfw->app_resident_data_offset;
64 const struct flcn_bl_dmem_desc_v1 hdr = {
65 .ctx_dma = FALCON_DMAIDX_UCODE,
66 .code_dma_base = code,
67 .non_sec_code_off = lsfw->app_resident_code_offset,
68 .non_sec_code_size = lsfw->app_resident_code_size,
69 .code_entry_point = lsfw->app_imem_entry,
70 .data_dma_base = data,
71 .data_size = lsfw->app_resident_data_size,
72 };
73
74 nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
75 }
76
77 const struct nvkm_acr_lsf_func
78 gm200_gr_gpccs_acr = {
79 .flags = NVKM_ACR_LSF_FORCE_PRIV_LOAD,
80 .bld_size = sizeof(struct flcn_bl_dmem_desc_v1),
81 .bld_write = gm200_gr_acr_bld_write,
82 .bld_patch = gm200_gr_acr_bld_patch,
83 };
84
85 const struct nvkm_acr_lsf_func
86 gm200_gr_fecs_acr = {
87 .bld_size = sizeof(struct flcn_bl_dmem_desc_v1),
88 .bld_write = gm200_gr_acr_bld_write,
89 .bld_patch = gm200_gr_acr_bld_patch,
90 };
91
92 int
gm200_gr_rops(struct gf100_gr * gr)93 gm200_gr_rops(struct gf100_gr *gr)
94 {
95 return nvkm_rd32(gr->base.engine.subdev.device, 0x12006c);
96 }
97
98 void
gm200_gr_init_ds_hww_esr_2(struct gf100_gr * gr)99 gm200_gr_init_ds_hww_esr_2(struct gf100_gr *gr)
100 {
101 struct nvkm_device *device = gr->base.engine.subdev.device;
102 nvkm_wr32(device, 0x405848, 0xc0000000);
103 nvkm_mask(device, 0x40584c, 0x00000001, 0x00000001);
104 }
105
106 void
gm200_gr_init_num_active_ltcs(struct gf100_gr * gr)107 gm200_gr_init_num_active_ltcs(struct gf100_gr *gr)
108 {
109 struct nvkm_device *device = gr->base.engine.subdev.device;
110 nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
111 nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
112 }
113
114 void
gm200_gr_init_gpc_mmu(struct gf100_gr * gr)115 gm200_gr_init_gpc_mmu(struct gf100_gr *gr)
116 {
117 struct nvkm_device *device = gr->base.engine.subdev.device;
118
119 nvkm_wr32(device, 0x418880, nvkm_rd32(device, 0x100c80) & 0xf0001fff);
120 nvkm_wr32(device, 0x418890, 0x00000000);
121 nvkm_wr32(device, 0x418894, 0x00000000);
122
123 nvkm_wr32(device, 0x4188b4, nvkm_rd32(device, 0x100cc8));
124 nvkm_wr32(device, 0x4188b8, nvkm_rd32(device, 0x100ccc));
125 nvkm_wr32(device, 0x4188b0, nvkm_rd32(device, 0x100cc4));
126 }
127
128 static void
gm200_gr_init_rop_active_fbps(struct gf100_gr * gr)129 gm200_gr_init_rop_active_fbps(struct gf100_gr *gr)
130 {
131 struct nvkm_device *device = gr->base.engine.subdev.device;
132 const u32 fbp_count = nvkm_rd32(device, 0x12006c);
133 nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
134 nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
135 }
136
137 static u8
138 gm200_gr_tile_map_6_24[] = {
139 0, 1, 2, 3, 4, 5, 3, 4, 5, 0, 1, 2, 0, 1, 2, 3, 4, 5, 3, 4, 5, 0, 1, 2,
140 };
141
142 static u8
143 gm200_gr_tile_map_4_16[] = {
144 0, 1, 2, 3, 2, 3, 0, 1, 3, 0, 1, 2, 1, 2, 3, 0,
145 };
146
147 static u8
148 gm200_gr_tile_map_2_8[] = {
149 0, 1, 1, 0, 0, 1, 1, 0,
150 };
151
152 void
gm200_gr_oneinit_sm_id(struct gf100_gr * gr)153 gm200_gr_oneinit_sm_id(struct gf100_gr *gr)
154 {
155 /*XXX: There's a different algorithm here I've not yet figured out. */
156 gf100_gr_oneinit_sm_id(gr);
157 }
158
159 void
gm200_gr_oneinit_tiles(struct gf100_gr * gr)160 gm200_gr_oneinit_tiles(struct gf100_gr *gr)
161 {
162 /*XXX: Not sure what this is about. The algorithm from NVGPU
163 * seems to work for all boards I tried from earlier (and
164 * later) GPUs except in these specific configurations.
165 *
166 * Let's just hardcode them for now.
167 */
168 if (gr->gpc_nr == 2 && gr->tpc_total == 8) {
169 memcpy(gr->tile, gm200_gr_tile_map_2_8, gr->tpc_total);
170 gr->screen_tile_row_offset = 1;
171 } else
172 if (gr->gpc_nr == 4 && gr->tpc_total == 16) {
173 memcpy(gr->tile, gm200_gr_tile_map_4_16, gr->tpc_total);
174 gr->screen_tile_row_offset = 4;
175 } else
176 if (gr->gpc_nr == 6 && gr->tpc_total == 24) {
177 memcpy(gr->tile, gm200_gr_tile_map_6_24, gr->tpc_total);
178 gr->screen_tile_row_offset = 5;
179 } else {
180 gf100_gr_oneinit_tiles(gr);
181 }
182 }
183
184 static const struct gf100_gr_func
185 gm200_gr = {
186 .oneinit_tiles = gm200_gr_oneinit_tiles,
187 .oneinit_sm_id = gm200_gr_oneinit_sm_id,
188 .init = gf100_gr_init,
189 .init_gpc_mmu = gm200_gr_init_gpc_mmu,
190 .init_bios = gm107_gr_init_bios,
191 .init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
192 .init_zcull = gf117_gr_init_zcull,
193 .init_num_active_ltcs = gm200_gr_init_num_active_ltcs,
194 .init_rop_active_fbps = gm200_gr_init_rop_active_fbps,
195 .init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
196 .init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
197 .init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
198 .init_419cc0 = gf100_gr_init_419cc0,
199 .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
200 .init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
201 .init_504430 = gm107_gr_init_504430,
202 .init_shader_exceptions = gm107_gr_init_shader_exceptions,
203 .init_400054 = gm107_gr_init_400054,
204 .trap_mp = gf100_gr_trap_mp,
205 .rops = gm200_gr_rops,
206 .tpc_nr = 4,
207 .ppc_nr = 2,
208 .grctx = &gm200_grctx,
209 .zbc = &gf100_gr_zbc,
210 .sclass = {
211 { -1, -1, FERMI_TWOD_A },
212 { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
213 { -1, -1, MAXWELL_B, &gf100_fermi },
214 { -1, -1, MAXWELL_COMPUTE_B },
215 {}
216 }
217 };
218
219 int
gm200_gr_load(struct gf100_gr * gr,int ver,const struct gf100_gr_fwif * fwif)220 gm200_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
221 {
222 int ret;
223
224 ret = nvkm_acr_lsfw_load_bl_inst_data_sig(&gr->base.engine.subdev,
225 &gr->fecs.falcon,
226 NVKM_ACR_LSF_FECS,
227 "gr/fecs_", ver, fwif->fecs);
228 if (ret)
229 return ret;
230
231 ret = nvkm_acr_lsfw_load_bl_inst_data_sig(&gr->base.engine.subdev,
232 &gr->gpccs.falcon,
233 NVKM_ACR_LSF_GPCCS,
234 "gr/gpccs_", ver,
235 fwif->gpccs);
236 if (ret)
237 return ret;
238
239 gr->firmware = true;
240
241 return gk20a_gr_load_sw(gr, "gr/", ver);
242 }
243
244 MODULE_FIRMWARE("nvidia/gm200/gr/fecs_bl.bin");
245 MODULE_FIRMWARE("nvidia/gm200/gr/fecs_inst.bin");
246 MODULE_FIRMWARE("nvidia/gm200/gr/fecs_data.bin");
247 MODULE_FIRMWARE("nvidia/gm200/gr/fecs_sig.bin");
248 MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_bl.bin");
249 MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_inst.bin");
250 MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_data.bin");
251 MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_sig.bin");
252 MODULE_FIRMWARE("nvidia/gm200/gr/sw_ctx.bin");
253 MODULE_FIRMWARE("nvidia/gm200/gr/sw_nonctx.bin");
254 MODULE_FIRMWARE("nvidia/gm200/gr/sw_bundle_init.bin");
255 MODULE_FIRMWARE("nvidia/gm200/gr/sw_method_init.bin");
256
257 MODULE_FIRMWARE("nvidia/gm204/gr/fecs_bl.bin");
258 MODULE_FIRMWARE("nvidia/gm204/gr/fecs_inst.bin");
259 MODULE_FIRMWARE("nvidia/gm204/gr/fecs_data.bin");
260 MODULE_FIRMWARE("nvidia/gm204/gr/fecs_sig.bin");
261 MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_bl.bin");
262 MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_inst.bin");
263 MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_data.bin");
264 MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_sig.bin");
265 MODULE_FIRMWARE("nvidia/gm204/gr/sw_ctx.bin");
266 MODULE_FIRMWARE("nvidia/gm204/gr/sw_nonctx.bin");
267 MODULE_FIRMWARE("nvidia/gm204/gr/sw_bundle_init.bin");
268 MODULE_FIRMWARE("nvidia/gm204/gr/sw_method_init.bin");
269
270 MODULE_FIRMWARE("nvidia/gm206/gr/fecs_bl.bin");
271 MODULE_FIRMWARE("nvidia/gm206/gr/fecs_inst.bin");
272 MODULE_FIRMWARE("nvidia/gm206/gr/fecs_data.bin");
273 MODULE_FIRMWARE("nvidia/gm206/gr/fecs_sig.bin");
274 MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_bl.bin");
275 MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_inst.bin");
276 MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_data.bin");
277 MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_sig.bin");
278 MODULE_FIRMWARE("nvidia/gm206/gr/sw_ctx.bin");
279 MODULE_FIRMWARE("nvidia/gm206/gr/sw_nonctx.bin");
280 MODULE_FIRMWARE("nvidia/gm206/gr/sw_bundle_init.bin");
281 MODULE_FIRMWARE("nvidia/gm206/gr/sw_method_init.bin");
282
283 static const struct gf100_gr_fwif
284 gm200_gr_fwif[] = {
285 { 0, gm200_gr_load, &gm200_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
286 { -1, gm200_gr_nofw },
287 {}
288 };
289
290 int
gm200_gr_new(struct nvkm_device * device,enum nvkm_subdev_type type,int inst,struct nvkm_gr ** pgr)291 gm200_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
292 {
293 return gf100_gr_new_(gm200_gr_fwif, device, type, inst, pgr);
294 }
295