Lines Matching refs:pmu

32 	struct nvkm_pmu *pmu = device->pmu;  in nvkm_pmu_fan_controlled()  local
37 if (pmu && pmu->func->code.size) in nvkm_pmu_fan_controlled()
48 nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable) in nvkm_pmu_pgob() argument
50 if (pmu && pmu->func->pgob) in nvkm_pmu_pgob()
51 pmu->func->pgob(pmu, enable); in nvkm_pmu_pgob()
57 struct nvkm_pmu *pmu = container_of(work, typeof(*pmu), recv.work); in nvkm_pmu_recv() local
58 return pmu->func->recv(pmu); in nvkm_pmu_recv()
62 nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2], in nvkm_pmu_send() argument
65 if (!pmu || !pmu->func->send) in nvkm_pmu_send()
67 return pmu->func->send(pmu, reply, process, message, data0, data1); in nvkm_pmu_send()
73 struct nvkm_pmu *pmu = nvkm_pmu(subdev); in nvkm_pmu_intr() local
74 if (!pmu->func->intr) in nvkm_pmu_intr()
76 pmu->func->intr(pmu); in nvkm_pmu_intr()
82 struct nvkm_pmu *pmu = nvkm_pmu(subdev); in nvkm_pmu_fini() local
84 if (pmu->func->fini) in nvkm_pmu_fini()
85 pmu->func->fini(pmu); in nvkm_pmu_fini()
87 flush_work(&pmu->recv.work); in nvkm_pmu_fini()
89 reinit_completion(&pmu->wpr_ready); in nvkm_pmu_fini()
91 nvkm_falcon_cmdq_fini(pmu->lpq); in nvkm_pmu_fini()
92 nvkm_falcon_cmdq_fini(pmu->hpq); in nvkm_pmu_fini()
93 pmu->initmsg_received = false; in nvkm_pmu_fini()
98 nvkm_pmu_reset(struct nvkm_pmu *pmu) in nvkm_pmu_reset() argument
100 struct nvkm_device *device = pmu->subdev.device; in nvkm_pmu_reset()
102 if (!pmu->func->enabled(pmu)) in nvkm_pmu_reset()
106 if (pmu->func->reset) in nvkm_pmu_reset()
107 pmu->func->reset(pmu); in nvkm_pmu_reset()
119 struct nvkm_pmu *pmu = nvkm_pmu(subdev); in nvkm_pmu_preinit() local
120 nvkm_pmu_reset(pmu); in nvkm_pmu_preinit()
127 struct nvkm_pmu *pmu = nvkm_pmu(subdev); in nvkm_pmu_init() local
128 struct nvkm_device *device = pmu->subdev.device; in nvkm_pmu_init()
130 if (!pmu->func->init) in nvkm_pmu_init()
133 if (pmu->func->enabled(pmu)) { in nvkm_pmu_init()
141 nvkm_pmu_reset(pmu); in nvkm_pmu_init()
144 return pmu->func->init(pmu); in nvkm_pmu_init()
150 struct nvkm_pmu *pmu = nvkm_pmu(subdev); in nvkm_pmu_dtor() local
151 nvkm_falcon_msgq_del(&pmu->msgq); in nvkm_pmu_dtor()
152 nvkm_falcon_cmdq_del(&pmu->lpq); in nvkm_pmu_dtor()
153 nvkm_falcon_cmdq_del(&pmu->hpq); in nvkm_pmu_dtor()
154 nvkm_falcon_qmgr_del(&pmu->qmgr); in nvkm_pmu_dtor()
155 nvkm_falcon_dtor(&pmu->falcon); in nvkm_pmu_dtor()
156 mutex_destroy(&pmu->send.mutex); in nvkm_pmu_dtor()
171 enum nvkm_subdev_type type, int inst, struct nvkm_pmu *pmu) in nvkm_pmu_ctor() argument
175 nvkm_subdev_ctor(&nvkm_pmu, device, type, inst, &pmu->subdev); in nvkm_pmu_ctor()
177 mutex_init(&pmu->send.mutex); in nvkm_pmu_ctor()
179 INIT_WORK(&pmu->recv.work, nvkm_pmu_recv); in nvkm_pmu_ctor()
180 init_waitqueue_head(&pmu->recv.wait); in nvkm_pmu_ctor()
182 fwif = nvkm_firmware_load(&pmu->subdev, fwif, "Pmu", pmu); in nvkm_pmu_ctor()
186 pmu->func = fwif->func; in nvkm_pmu_ctor()
188 ret = nvkm_falcon_ctor(pmu->func->flcn, &pmu->subdev, pmu->subdev.name, in nvkm_pmu_ctor()
189 0x10a000, &pmu->falcon); in nvkm_pmu_ctor()
193 if ((ret = nvkm_falcon_qmgr_new(&pmu->falcon, &pmu->qmgr)) || in nvkm_pmu_ctor()
194 (ret = nvkm_falcon_cmdq_new(pmu->qmgr, "hpq", &pmu->hpq)) || in nvkm_pmu_ctor()
195 (ret = nvkm_falcon_cmdq_new(pmu->qmgr, "lpq", &pmu->lpq)) || in nvkm_pmu_ctor()
196 (ret = nvkm_falcon_msgq_new(pmu->qmgr, "msgq", &pmu->msgq))) in nvkm_pmu_ctor()
199 init_completion(&pmu->wpr_ready); in nvkm_pmu_ctor()
207 struct nvkm_pmu *pmu; in nvkm_pmu_new_() local
208 if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL))) in nvkm_pmu_new_()