1 /*
2  * Copyright 2015 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs <bskeggs@redhat.com>
23  */
24 #include "gf100.h"
25 #include "ctxgf100.h"
26 
27 #include <core/firmware.h>
28 #include <subdev/acr.h>
29 
30 #include <nvfw/flcn.h>
31 
32 #include <nvif/class.h>
33 
34 int
gm200_gr_nofw(struct gf100_gr * gr,int ver,const struct gf100_gr_fwif * fwif)35 gm200_gr_nofw(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
36 {
37 	nvkm_warn(&gr->base.engine.subdev, "firmware unavailable\n");
38 	return -ENODEV;
39 }
40 
41 /*******************************************************************************
42  * PGRAPH engine/subdev functions
43  ******************************************************************************/
44 
45 static void
gm200_gr_acr_bld_patch(struct nvkm_acr * acr,u32 bld,s64 adjust)46 gm200_gr_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust)
47 {
48 	struct flcn_bl_dmem_desc_v1 hdr;
49 	nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
50 	hdr.code_dma_base = hdr.code_dma_base + adjust;
51 	hdr.data_dma_base = hdr.data_dma_base + adjust;
52 	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
53 	flcn_bl_dmem_desc_v1_dump(&acr->subdev, &hdr);
54 }
55 
56 static void
gm200_gr_acr_bld_write(struct nvkm_acr * acr,u32 bld,struct nvkm_acr_lsfw * lsfw)57 gm200_gr_acr_bld_write(struct nvkm_acr *acr, u32 bld,
58 		       struct nvkm_acr_lsfw *lsfw)
59 {
60 	const u64 base = lsfw->offset.img + lsfw->app_start_offset;
61 	const u64 code = base + lsfw->app_resident_code_offset;
62 	const u64 data = base + lsfw->app_resident_data_offset;
63 	const struct flcn_bl_dmem_desc_v1 hdr = {
64 		.ctx_dma = FALCON_DMAIDX_UCODE,
65 		.code_dma_base = code,
66 		.non_sec_code_off = lsfw->app_resident_code_offset,
67 		.non_sec_code_size = lsfw->app_resident_code_size,
68 		.code_entry_point = lsfw->app_imem_entry,
69 		.data_dma_base = data,
70 		.data_size = lsfw->app_resident_data_size,
71 	};
72 
73 	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
74 }
75 
76 const struct nvkm_acr_lsf_func
77 gm200_gr_gpccs_acr = {
78 	.flags = NVKM_ACR_LSF_FORCE_PRIV_LOAD,
79 	.bld_size = sizeof(struct flcn_bl_dmem_desc_v1),
80 	.bld_write = gm200_gr_acr_bld_write,
81 	.bld_patch = gm200_gr_acr_bld_patch,
82 };
83 
84 const struct nvkm_acr_lsf_func
85 gm200_gr_fecs_acr = {
86 	.bld_size = sizeof(struct flcn_bl_dmem_desc_v1),
87 	.bld_write = gm200_gr_acr_bld_write,
88 	.bld_patch = gm200_gr_acr_bld_patch,
89 };
90 
91 int
gm200_gr_rops(struct gf100_gr * gr)92 gm200_gr_rops(struct gf100_gr *gr)
93 {
94 	return nvkm_rd32(gr->base.engine.subdev.device, 0x12006c);
95 }
96 
97 void
gm200_gr_init_ds_hww_esr_2(struct gf100_gr * gr)98 gm200_gr_init_ds_hww_esr_2(struct gf100_gr *gr)
99 {
100 	struct nvkm_device *device = gr->base.engine.subdev.device;
101 	nvkm_wr32(device, 0x405848, 0xc0000000);
102 	nvkm_mask(device, 0x40584c, 0x00000001, 0x00000001);
103 }
104 
105 void
gm200_gr_init_num_active_ltcs(struct gf100_gr * gr)106 gm200_gr_init_num_active_ltcs(struct gf100_gr *gr)
107 {
108 	struct nvkm_device *device = gr->base.engine.subdev.device;
109 	nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
110 	nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
111 }
112 
113 void
gm200_gr_init_gpc_mmu(struct gf100_gr * gr)114 gm200_gr_init_gpc_mmu(struct gf100_gr *gr)
115 {
116 	struct nvkm_device *device = gr->base.engine.subdev.device;
117 
118 	nvkm_wr32(device, 0x418880, nvkm_rd32(device, 0x100c80) & 0xf0001fff);
119 	nvkm_wr32(device, 0x418890, 0x00000000);
120 	nvkm_wr32(device, 0x418894, 0x00000000);
121 
122 	nvkm_wr32(device, 0x4188b4, nvkm_rd32(device, 0x100cc8));
123 	nvkm_wr32(device, 0x4188b8, nvkm_rd32(device, 0x100ccc));
124 	nvkm_wr32(device, 0x4188b0, nvkm_rd32(device, 0x100cc4));
125 }
126 
127 static void
gm200_gr_init_rop_active_fbps(struct gf100_gr * gr)128 gm200_gr_init_rop_active_fbps(struct gf100_gr *gr)
129 {
130 	struct nvkm_device *device = gr->base.engine.subdev.device;
131 	const u32 fbp_count = nvkm_rd32(device, 0x12006c);
132 	nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
133 	nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
134 }
135 
136 static u8
137 gm200_gr_tile_map_6_24[] = {
138 	0, 1, 2, 3, 4, 5, 3, 4, 5, 0, 1, 2, 0, 1, 2, 3, 4, 5, 3, 4, 5, 0, 1, 2,
139 };
140 
141 static u8
142 gm200_gr_tile_map_4_16[] = {
143 	0, 1, 2, 3, 2, 3, 0, 1, 3, 0, 1, 2, 1, 2, 3, 0,
144 };
145 
146 static u8
147 gm200_gr_tile_map_2_8[] = {
148 	0, 1, 1, 0, 0, 1, 1, 0,
149 };
150 
151 void
gm200_gr_oneinit_sm_id(struct gf100_gr * gr)152 gm200_gr_oneinit_sm_id(struct gf100_gr *gr)
153 {
154 	/*XXX: There's a different algorithm here I've not yet figured out. */
155 	gf100_gr_oneinit_sm_id(gr);
156 }
157 
158 void
gm200_gr_oneinit_tiles(struct gf100_gr * gr)159 gm200_gr_oneinit_tiles(struct gf100_gr *gr)
160 {
161 	/*XXX: Not sure what this is about.  The algorithm from NVGPU
162 	 *     seems to work for all boards I tried from earlier (and
163 	 *     later) GPUs except in these specific configurations.
164 	 *
165 	 *     Let's just hardcode them for now.
166 	 */
167 	if (gr->gpc_nr == 2 && gr->tpc_total == 8) {
168 		memcpy(gr->tile, gm200_gr_tile_map_2_8, gr->tpc_total);
169 		gr->screen_tile_row_offset = 1;
170 	} else
171 	if (gr->gpc_nr == 4 && gr->tpc_total == 16) {
172 		memcpy(gr->tile, gm200_gr_tile_map_4_16, gr->tpc_total);
173 		gr->screen_tile_row_offset = 4;
174 	} else
175 	if (gr->gpc_nr == 6 && gr->tpc_total == 24) {
176 		memcpy(gr->tile, gm200_gr_tile_map_6_24, gr->tpc_total);
177 		gr->screen_tile_row_offset = 5;
178 	} else {
179 		gf100_gr_oneinit_tiles(gr);
180 	}
181 }
182 
183 static const struct gf100_gr_func
184 gm200_gr = {
185 	.oneinit_tiles = gm200_gr_oneinit_tiles,
186 	.oneinit_sm_id = gm200_gr_oneinit_sm_id,
187 	.init = gf100_gr_init,
188 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
189 	.init_bios = gm107_gr_init_bios,
190 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
191 	.init_zcull = gf117_gr_init_zcull,
192 	.init_num_active_ltcs = gm200_gr_init_num_active_ltcs,
193 	.init_rop_active_fbps = gm200_gr_init_rop_active_fbps,
194 	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
195 	.init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
196 	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
197 	.init_419cc0 = gf100_gr_init_419cc0,
198 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
199 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
200 	.init_504430 = gm107_gr_init_504430,
201 	.init_shader_exceptions = gm107_gr_init_shader_exceptions,
202 	.init_400054 = gm107_gr_init_400054,
203 	.trap_mp = gf100_gr_trap_mp,
204 	.rops = gm200_gr_rops,
205 	.tpc_nr = 4,
206 	.ppc_nr = 2,
207 	.grctx = &gm200_grctx,
208 	.zbc = &gf100_gr_zbc,
209 	.sclass = {
210 		{ -1, -1, FERMI_TWOD_A },
211 		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
212 		{ -1, -1, MAXWELL_B, &gf100_fermi },
213 		{ -1, -1, MAXWELL_COMPUTE_B },
214 		{}
215 	}
216 };
217 
218 int
gm200_gr_load(struct gf100_gr * gr,int ver,const struct gf100_gr_fwif * fwif)219 gm200_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
220 {
221 	int ret;
222 
223 	ret = nvkm_acr_lsfw_load_bl_inst_data_sig(&gr->base.engine.subdev,
224 						  &gr->fecs.falcon,
225 						  NVKM_ACR_LSF_FECS,
226 						  "gr/fecs_", ver, fwif->fecs);
227 	if (ret)
228 		return ret;
229 
230 	ret = nvkm_acr_lsfw_load_bl_inst_data_sig(&gr->base.engine.subdev,
231 						  &gr->gpccs.falcon,
232 						  NVKM_ACR_LSF_GPCCS,
233 						  "gr/gpccs_", ver,
234 						  fwif->gpccs);
235 	if (ret)
236 		return ret;
237 
238 	gr->firmware = true;
239 
240 	return gk20a_gr_load_sw(gr, "gr/", ver);
241 }
242 
243 MODULE_FIRMWARE("nvidia/gm200/gr/fecs_bl.bin");
244 MODULE_FIRMWARE("nvidia/gm200/gr/fecs_inst.bin");
245 MODULE_FIRMWARE("nvidia/gm200/gr/fecs_data.bin");
246 MODULE_FIRMWARE("nvidia/gm200/gr/fecs_sig.bin");
247 MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_bl.bin");
248 MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_inst.bin");
249 MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_data.bin");
250 MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_sig.bin");
251 MODULE_FIRMWARE("nvidia/gm200/gr/sw_ctx.bin");
252 MODULE_FIRMWARE("nvidia/gm200/gr/sw_nonctx.bin");
253 MODULE_FIRMWARE("nvidia/gm200/gr/sw_bundle_init.bin");
254 MODULE_FIRMWARE("nvidia/gm200/gr/sw_method_init.bin");
255 
256 MODULE_FIRMWARE("nvidia/gm204/gr/fecs_bl.bin");
257 MODULE_FIRMWARE("nvidia/gm204/gr/fecs_inst.bin");
258 MODULE_FIRMWARE("nvidia/gm204/gr/fecs_data.bin");
259 MODULE_FIRMWARE("nvidia/gm204/gr/fecs_sig.bin");
260 MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_bl.bin");
261 MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_inst.bin");
262 MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_data.bin");
263 MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_sig.bin");
264 MODULE_FIRMWARE("nvidia/gm204/gr/sw_ctx.bin");
265 MODULE_FIRMWARE("nvidia/gm204/gr/sw_nonctx.bin");
266 MODULE_FIRMWARE("nvidia/gm204/gr/sw_bundle_init.bin");
267 MODULE_FIRMWARE("nvidia/gm204/gr/sw_method_init.bin");
268 
269 MODULE_FIRMWARE("nvidia/gm206/gr/fecs_bl.bin");
270 MODULE_FIRMWARE("nvidia/gm206/gr/fecs_inst.bin");
271 MODULE_FIRMWARE("nvidia/gm206/gr/fecs_data.bin");
272 MODULE_FIRMWARE("nvidia/gm206/gr/fecs_sig.bin");
273 MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_bl.bin");
274 MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_inst.bin");
275 MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_data.bin");
276 MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_sig.bin");
277 MODULE_FIRMWARE("nvidia/gm206/gr/sw_ctx.bin");
278 MODULE_FIRMWARE("nvidia/gm206/gr/sw_nonctx.bin");
279 MODULE_FIRMWARE("nvidia/gm206/gr/sw_bundle_init.bin");
280 MODULE_FIRMWARE("nvidia/gm206/gr/sw_method_init.bin");
281 
282 static const struct gf100_gr_fwif
283 gm200_gr_fwif[] = {
284 	{  0, gm200_gr_load, &gm200_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
285 	{ -1, gm200_gr_nofw },
286 	{}
287 };
288 
289 int
gm200_gr_new(struct nvkm_device * device,enum nvkm_subdev_type type,int inst,struct nvkm_gr ** pgr)290 gm200_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
291 {
292 	return gf100_gr_new_(gm200_gr_fwif, device, type, inst, pgr);
293 }
294