/linux-6.6.21/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/ |
D | base.c | 32 struct nvkm_pmu *pmu = device->pmu; in nvkm_pmu_fan_controlled() local 37 if (pmu && pmu->func->code.size) in nvkm_pmu_fan_controlled() 48 nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable) in nvkm_pmu_pgob() argument 50 if (pmu && pmu->func->pgob) in nvkm_pmu_pgob() 51 pmu->func->pgob(pmu, enable); in nvkm_pmu_pgob() 57 struct nvkm_pmu *pmu = container_of(work, typeof(*pmu), recv.work); in nvkm_pmu_recv() local 58 return pmu->func->recv(pmu); in nvkm_pmu_recv() 62 nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2], in nvkm_pmu_send() argument 65 if (!pmu || !pmu->func->send) in nvkm_pmu_send() 67 return pmu->func->send(pmu, reply, process, message, data0, data1); in nvkm_pmu_send() [all …]
|
D | gt215.c | 30 gt215_pmu_send(struct nvkm_pmu *pmu, u32 reply[2], in gt215_pmu_send() argument 33 struct nvkm_subdev *subdev = &pmu->subdev; in gt215_pmu_send() 37 mutex_lock(&pmu->send.mutex); in gt215_pmu_send() 45 mutex_unlock(&pmu->send.mutex); in gt215_pmu_send() 54 pmu->recv.message = message; in gt215_pmu_send() 55 pmu->recv.process = process; in gt215_pmu_send() 65 pmu->send.base)); in gt215_pmu_send() 77 wait_event(pmu->recv.wait, (pmu->recv.process == 0)); in gt215_pmu_send() 78 reply[0] = pmu->recv.data[0]; in gt215_pmu_send() 79 reply[1] = pmu->recv.data[1]; in gt215_pmu_send() [all …]
|
D | gk20a.c | 51 gk20a_pmu_dvfs_target(struct gk20a_pmu *pmu, int *state) in gk20a_pmu_dvfs_target() argument 53 struct nvkm_clk *clk = pmu->base.subdev.device->clk; in gk20a_pmu_dvfs_target() 59 gk20a_pmu_dvfs_get_cur_state(struct gk20a_pmu *pmu, int *state) in gk20a_pmu_dvfs_get_cur_state() argument 61 struct nvkm_clk *clk = pmu->base.subdev.device->clk; in gk20a_pmu_dvfs_get_cur_state() 67 gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu *pmu, in gk20a_pmu_dvfs_get_target_state() argument 70 struct gk20a_pmu_dvfs_data *data = pmu->data; in gk20a_pmu_dvfs_get_target_state() 71 struct nvkm_clk *clk = pmu->base.subdev.device->clk; in gk20a_pmu_dvfs_get_target_state() 86 nvkm_trace(&pmu->base.subdev, "cur level = %d, new level = %d\n", in gk20a_pmu_dvfs_get_target_state() 95 gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu, in gk20a_pmu_dvfs_get_dev_status() argument 98 struct nvkm_falcon *falcon = &pmu->base.falcon; in gk20a_pmu_dvfs_get_dev_status() [all …]
|
D | gm20b.c | 42 struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon); in gm20b_pmu_acr_bootstrap_falcon() local 52 ret = nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr, in gm20b_pmu_acr_bootstrap_falcon() 54 &pmu->subdev, msecs_to_jiffies(1000)); in gm20b_pmu_acr_bootstrap_falcon() 129 struct nvkm_pmu *pmu = priv; in gm20b_pmu_acr_init_wpr_callback() local 130 struct nvkm_subdev *subdev = &pmu->subdev; in gm20b_pmu_acr_init_wpr_callback() 139 complete_all(&pmu->wpr_ready); in gm20b_pmu_acr_init_wpr_callback() 144 gm20b_pmu_acr_init_wpr(struct nvkm_pmu *pmu) in gm20b_pmu_acr_init_wpr() argument 154 return nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr, in gm20b_pmu_acr_init_wpr() 155 gm20b_pmu_acr_init_wpr_callback, pmu, 0); in gm20b_pmu_acr_init_wpr() 159 gm20b_pmu_initmsg(struct nvkm_pmu *pmu) in gm20b_pmu_initmsg() argument [all …]
|
/linux-6.6.21/tools/perf/util/ |
D | pmus.c | 66 struct perf_pmu *pmu, *tmp; in perf_pmus__destroy() local 68 list_for_each_entry_safe(pmu, tmp, &core_pmus, list) { in perf_pmus__destroy() 69 list_del(&pmu->list); in perf_pmus__destroy() 71 perf_pmu__delete(pmu); in perf_pmus__destroy() 73 list_for_each_entry_safe(pmu, tmp, &other_pmus, list) { in perf_pmus__destroy() 74 list_del(&pmu->list); in perf_pmus__destroy() 76 perf_pmu__delete(pmu); in perf_pmus__destroy() 84 struct perf_pmu *pmu; in pmu_find() local 86 list_for_each_entry(pmu, &core_pmus, list) { in pmu_find() 87 if (!strcmp(pmu->name, name) || in pmu_find() [all …]
|
D | pmu.c | 115 static int pmu_aliases_parse(struct perf_pmu *pmu); 158 static void perf_pmu_format__load(struct perf_pmu *pmu, struct perf_pmu_format *format) in perf_pmu_format__load() argument 166 if (!perf_pmu__pathname_scnprintf(path, sizeof(path), pmu->name, "format")) in perf_pmu_format__load() 184 int perf_pmu__format_parse(struct perf_pmu *pmu, int dirfd, bool eager_load) in perf_pmu__format_parse() argument 201 format = perf_pmu__new_format(&pmu->format, name); in perf_pmu__format_parse() 234 static int pmu_format(struct perf_pmu *pmu, int dirfd, const char *name) in pmu_format() argument 243 if (perf_pmu__format_parse(pmu, fd, /*eager_load=*/false)) in pmu_format() 286 static int perf_pmu__parse_scale(struct perf_pmu *pmu, struct perf_pmu_alias *alias) in perf_pmu__parse_scale() argument 298 scnprintf(path + len, sizeof(path) - len, "%s/events/%s.scale", pmu->name, alias->name); in perf_pmu__parse_scale() 322 static int perf_pmu__parse_unit(struct perf_pmu *pmu, struct perf_pmu_alias *alias) in perf_pmu__parse_unit() argument [all …]
|
D | pmu.h | 177 const struct perf_pmu *pmu; member 192 void pmu_add_sys_aliases(struct perf_pmu *pmu); 193 int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr, 196 int perf_pmu__config_terms(struct perf_pmu *pmu, 200 __u64 perf_pmu__format_bits(struct perf_pmu *pmu, const char *name); 201 int perf_pmu__format_type(struct perf_pmu *pmu, const char *name); 202 int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms, 204 int perf_pmu__find_event(struct perf_pmu *pmu, const char *event, void *state, pmu_event_callback c… 206 int perf_pmu__format_parse(struct perf_pmu *pmu, int dirfd, bool eager_load); 208 bool perf_pmu__has_format(const struct perf_pmu *pmu, const char *name); [all …]
|
/linux-6.6.21/drivers/gpu/drm/i915/ |
D | i915_pmu.c | 142 static bool pmu_needs_timer(struct i915_pmu *pmu) in pmu_needs_timer() argument 144 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); in pmu_needs_timer() 152 enable = pmu->enable; in pmu_needs_timer() 194 static u64 read_sample(struct i915_pmu *pmu, unsigned int gt_id, int sample) in read_sample() argument 196 return pmu->sample[gt_id][sample].cur; in read_sample() 200 store_sample(struct i915_pmu *pmu, unsigned int gt_id, int sample, u64 val) in store_sample() argument 202 pmu->sample[gt_id][sample].cur = val; in store_sample() 206 add_sample_mult(struct i915_pmu *pmu, unsigned int gt_id, int sample, u32 val, u32 mul) in add_sample_mult() argument 208 pmu->sample[gt_id][sample].cur += mul_u32_u32(val, mul); in add_sample_mult() 215 struct i915_pmu *pmu = &i915->pmu; in get_rc6() local [all …]
|
/linux-6.6.21/arch/x86/kvm/vmx/ |
D | pmu_intel.c | 71 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data) in reprogram_fixed_counters() argument 74 u64 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl; in reprogram_fixed_counters() 77 pmu->fixed_ctr_ctrl = data; in reprogram_fixed_counters() 78 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { in reprogram_fixed_counters() 85 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i); in reprogram_fixed_counters() 87 __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use); in reprogram_fixed_counters() 92 static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx) in intel_pmc_idx_to_pmc() argument 95 return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx, in intel_pmc_idx_to_pmc() 100 return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0); in intel_pmc_idx_to_pmc() 106 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in intel_hw_event_available() local [all …]
|
/linux-6.6.21/drivers/soc/dove/ |
D | pmu.c | 50 struct pmu_data *pmu = rcdev_to_pmu(rc); in pmu_reset_reset() local 54 spin_lock_irqsave(&pmu->lock, flags); in pmu_reset_reset() 55 val = readl_relaxed(pmu->pmc_base + PMC_SW_RST); in pmu_reset_reset() 56 writel_relaxed(val & ~BIT(id), pmu->pmc_base + PMC_SW_RST); in pmu_reset_reset() 57 writel_relaxed(val | BIT(id), pmu->pmc_base + PMC_SW_RST); in pmu_reset_reset() 58 spin_unlock_irqrestore(&pmu->lock, flags); in pmu_reset_reset() 65 struct pmu_data *pmu = rcdev_to_pmu(rc); in pmu_reset_assert() local 69 spin_lock_irqsave(&pmu->lock, flags); in pmu_reset_assert() 70 val &= readl_relaxed(pmu->pmc_base + PMC_SW_RST); in pmu_reset_assert() 71 writel_relaxed(val, pmu->pmc_base + PMC_SW_RST); in pmu_reset_assert() [all …]
|
/linux-6.6.21/drivers/perf/ |
D | fsl_imx8_ddr_perf.c | 43 #define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu) 97 struct pmu pmu; member 114 struct ddr_pmu *pmu = dev_get_drvdata(dev); in ddr_perf_identifier_show() local 116 return sysfs_emit(page, "%s\n", pmu->devtype_data->identifier); in ddr_perf_identifier_show() 124 struct ddr_pmu *pmu = dev_get_drvdata(dev); in ddr_perf_identifier_attr_visible() local 126 if (!pmu->devtype_data->identifier) in ddr_perf_identifier_attr_visible() 150 static u32 ddr_perf_filter_cap_get(struct ddr_pmu *pmu, int cap) in ddr_perf_filter_cap_get() argument 152 u32 quirks = pmu->devtype_data->quirks; in ddr_perf_filter_cap_get() 171 struct ddr_pmu *pmu = dev_get_drvdata(dev); in ddr_perf_filter_cap_show() local 176 return sysfs_emit(buf, "%u\n", ddr_perf_filter_cap_get(pmu, cap)); in ddr_perf_filter_cap_show() [all …]
|
D | fsl_imx9_ddr_perf.c | 45 #define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu) 57 struct pmu pmu; member 84 struct ddr_pmu *pmu = dev_get_drvdata(dev); in ddr_perf_identifier_show() local 86 return sysfs_emit(page, "%s\n", pmu->devtype_data->identifier); in ddr_perf_identifier_show() 104 struct ddr_pmu *pmu = dev_get_drvdata(dev); in ddr_perf_cpumask_show() local 106 return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu)); in ddr_perf_cpumask_show() 271 static void ddr_perf_clear_counter(struct ddr_pmu *pmu, int counter) in ddr_perf_clear_counter() argument 274 writel(0, pmu->base + PMC(counter) + 0x4); in ddr_perf_clear_counter() 275 writel(0, pmu->base + PMC(counter)); in ddr_perf_clear_counter() 277 writel(0, pmu->base + PMC(counter)); in ddr_perf_clear_counter() [all …]
|
D | marvell_cn10k_ddr_pmu.c | 125 struct pmu pmu; member 135 #define to_cn10k_ddr_pmu(p) container_of(p, struct cn10k_ddr_pmu, pmu) 233 struct cn10k_ddr_pmu *pmu = dev_get_drvdata(dev); in cn10k_ddr_perf_cpumask_show() local 235 return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu)); in cn10k_ddr_perf_cpumask_show() 289 static int cn10k_ddr_perf_alloc_counter(struct cn10k_ddr_pmu *pmu, in cn10k_ddr_perf_alloc_counter() argument 297 pmu->events[DDRC_PERF_READ_COUNTER_IDX] = event; in cn10k_ddr_perf_alloc_counter() 303 pmu->events[DDRC_PERF_WRITE_COUNTER_IDX] = event; in cn10k_ddr_perf_alloc_counter() 309 if (pmu->events[i] == NULL) { in cn10k_ddr_perf_alloc_counter() 310 pmu->events[i] = event; in cn10k_ddr_perf_alloc_counter() 318 static void cn10k_ddr_perf_free_counter(struct cn10k_ddr_pmu *pmu, int counter) in cn10k_ddr_perf_free_counter() argument [all …]
|
D | arm_pmu_platform.c | 25 static int probe_current_pmu(struct arm_pmu *pmu, in probe_current_pmu() argument 37 ret = info->init(pmu); in probe_current_pmu() 45 static int pmu_parse_percpu_irq(struct arm_pmu *pmu, int irq) in pmu_parse_percpu_irq() argument 48 struct pmu_hw_events __percpu *hw_events = pmu->hw_events; in pmu_parse_percpu_irq() 50 ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus); in pmu_parse_percpu_irq() 54 for_each_cpu(cpu, &pmu->supported_cpus) in pmu_parse_percpu_irq() 95 static int pmu_parse_irqs(struct arm_pmu *pmu) in pmu_parse_irqs() argument 98 struct platform_device *pdev = pmu->plat_device; in pmu_parse_irqs() 99 struct pmu_hw_events __percpu *hw_events = pmu->hw_events; in pmu_parse_irqs() 112 pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; in pmu_parse_irqs() [all …]
|
D | riscv_pmu_legacy.c | 110 static void pmu_legacy_init(struct riscv_pmu *pmu) in pmu_legacy_init() argument 114 pmu->cmask = BIT(RISCV_PMU_LEGACY_CYCLE) | in pmu_legacy_init() 116 pmu->ctr_start = pmu_legacy_ctr_start; in pmu_legacy_init() 117 pmu->ctr_stop = NULL; in pmu_legacy_init() 118 pmu->event_map = pmu_legacy_event_map; in pmu_legacy_init() 119 pmu->ctr_get_idx = pmu_legacy_ctr_get_idx; in pmu_legacy_init() 120 pmu->ctr_get_width = pmu_legacy_ctr_get_width; in pmu_legacy_init() 121 pmu->ctr_clear_idx = NULL; in pmu_legacy_init() 122 pmu->ctr_read = pmu_legacy_read_ctr; in pmu_legacy_init() 123 pmu->event_mapped = pmu_legacy_event_mapped; in pmu_legacy_init() [all …]
|
/linux-6.6.21/drivers/pmdomain/starfive/ |
D | jh71xx-pmu.c | 71 struct jh71xx_pmu *pmu; member 77 struct jh71xx_pmu *pmu = pmd->pmu; in jh71xx_pmu_get_state() local 82 *is_on = readl(pmu->base + JH71XX_PMU_CURR_POWER_MODE) & mask; in jh71xx_pmu_get_state() 89 struct jh71xx_pmu *pmu = pmd->pmu; in jh71xx_pmu_set_state() local 100 dev_dbg(pmu->dev, "unable to get current state for %s\n", in jh71xx_pmu_set_state() 106 dev_dbg(pmu->dev, "pm domain [%s] is already %sable status.\n", in jh71xx_pmu_set_state() 111 spin_lock_irqsave(&pmu->lock, flags); in jh71xx_pmu_set_state() 133 writel(mask, pmu->base + mode); in jh71xx_pmu_set_state() 143 writel(JH71XX_PMU_SW_ENCOURAGE_ON, pmu->base + JH71XX_PMU_SW_ENCOURAGE); in jh71xx_pmu_set_state() 144 writel(encourage_lo, pmu->base + JH71XX_PMU_SW_ENCOURAGE); in jh71xx_pmu_set_state() [all …]
|
/linux-6.6.21/arch/x86/kvm/svm/ |
D | pmu.c | 28 static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx) in amd_pmc_idx_to_pmc() argument 30 unsigned int num_counters = pmu->nr_arch_gp_counters; in amd_pmc_idx_to_pmc() 35 return &pmu->gp_counters[array_index_nospec(pmc_idx, num_counters)]; in amd_pmc_idx_to_pmc() 38 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr, in get_gp_pmc_amd() argument 41 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); in get_gp_pmc_amd() 73 return amd_pmc_idx_to_pmc(pmu, idx); in get_gp_pmc_amd() 83 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in amd_is_valid_rdpmc_ecx() local 87 return idx < pmu->nr_arch_gp_counters; in amd_is_valid_rdpmc_ecx() 99 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in amd_msr_idx_to_pmc() local 102 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER); in amd_msr_idx_to_pmc() [all …]
|
/linux-6.6.21/drivers/perf/amlogic/ |
D | meson_ddr_pmu_core.c | 21 struct pmu pmu; member 35 #define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu) 38 static void dmc_pmu_enable(struct ddr_pmu *pmu) in dmc_pmu_enable() argument 40 if (!pmu->pmu_enabled) in dmc_pmu_enable() 41 pmu->info.hw_info->enable(&pmu->info); in dmc_pmu_enable() 43 pmu->pmu_enabled = true; in dmc_pmu_enable() 46 static void dmc_pmu_disable(struct ddr_pmu *pmu) in dmc_pmu_disable() argument 48 if (pmu->pmu_enabled) in dmc_pmu_disable() 49 pmu->info.hw_info->disable(&pmu->info); in dmc_pmu_disable() 51 pmu->pmu_enabled = false; in dmc_pmu_disable() [all …]
|
/linux-6.6.21/Documentation/devicetree/bindings/soc/samsung/ |
D | exynos-pmu.yaml | 4 $id: http://devicetree.org/schemas/soc/samsung/exynos-pmu.yaml# 18 - samsung,exynos3250-pmu 19 - samsung,exynos4210-pmu 20 - samsung,exynos4212-pmu 21 - samsung,exynos4412-pmu 22 - samsung,exynos5250-pmu 23 - samsung,exynos5260-pmu 24 - samsung,exynos5410-pmu 25 - samsung,exynos5420-pmu 26 - samsung,exynos5433-pmu [all …]
|
/linux-6.6.21/Documentation/devicetree/bindings/arm/ |
D | pmu.yaml | 4 $id: http://devicetree.org/schemas/arm/pmu.yaml# 22 - apm,potenza-pmu 23 - apple,avalanche-pmu 24 - apple,blizzard-pmu 25 - apple,firestorm-pmu 26 - apple,icestorm-pmu 28 - arm,arm1136-pmu 29 - arm,arm1176-pmu 30 - arm,arm11mpcore-pmu 31 - arm,cortex-a5-pmu [all …]
|
/linux-6.6.21/Documentation/devicetree/bindings/arm/rockchip/ |
D | pmu.yaml | 4 $id: http://devicetree.org/schemas/arm/rockchip/pmu.yaml# 22 - rockchip,px30-pmu 23 - rockchip,rk3066-pmu 24 - rockchip,rk3128-pmu 25 - rockchip,rk3288-pmu 26 - rockchip,rk3368-pmu 27 - rockchip,rk3399-pmu 28 - rockchip,rk3568-pmu 29 - rockchip,rk3588-pmu 30 - rockchip,rv1126-pmu [all …]
|
/linux-6.6.21/arch/x86/events/intel/ |
D | uncore.h | 88 struct pmu *pmu; /* for custom pmu ops */ member 122 struct pmu pmu; member 151 struct intel_uncore_pmu *pmu; member 220 return container_of(dev_get_drvdata(dev), struct intel_uncore_pmu, pmu); in dev_to_uncore_pmu() 260 if (offset < box->pmu->type->mmio_map_size) in uncore_mmio_is_valid_offset() 264 offset, box->pmu->type->name); in uncore_mmio_is_valid_offset() 272 return box->pmu->type->box_ctl + in uncore_mmio_box_ctl() 273 box->pmu->type->mmio_offset * box->pmu->pmu_idx; in uncore_mmio_box_ctl() 278 return box->pmu->type->box_ctl; in uncore_pci_box_ctl() 283 return box->pmu->type->fixed_ctl; in uncore_pci_fixed_ctl() [all …]
|
/linux-6.6.21/arch/x86/events/ |
D | rapl.c | 110 struct pmu *pmu; member 116 struct pmu pmu; member 210 static void rapl_start_hrtimer(struct rapl_pmu *pmu) in rapl_start_hrtimer() argument 212 hrtimer_start(&pmu->hrtimer, pmu->timer_interval, in rapl_start_hrtimer() 218 struct rapl_pmu *pmu = container_of(hrtimer, struct rapl_pmu, hrtimer); in rapl_hrtimer_handle() local 222 if (!pmu->n_active) in rapl_hrtimer_handle() 225 raw_spin_lock_irqsave(&pmu->lock, flags); in rapl_hrtimer_handle() 227 list_for_each_entry(event, &pmu->active_list, active_entry) in rapl_hrtimer_handle() 230 raw_spin_unlock_irqrestore(&pmu->lock, flags); in rapl_hrtimer_handle() 232 hrtimer_forward_now(hrtimer, pmu->timer_interval); in rapl_hrtimer_handle() [all …]
|
/linux-6.6.21/tools/perf/tests/ |
D | pmu-events.c | 41 struct perf_pmu pmu; member 47 .pmu = "default_core", 59 .pmu = "default_core", 71 .pmu = "default_core", 83 .pmu = "default_core", 95 .pmu = "default_core", 107 .pmu = "default_core", 135 .pmu = "hisi_sccl,ddrc", 149 .pmu = "uncore_cbox", 163 .pmu = "uncore_cbox", [all …]
|
/linux-6.6.21/Documentation/devicetree/bindings/pinctrl/ |
D | marvell,dove-pinctrl.txt | 14 Note: pmu* also allows for Power Management functions listed below 18 mpp0 0 gpio, pmu, uart2(rts), sdio0(cd), lcd0(pwm), pmu* 19 mpp1 1 gpio, pmu, uart2(cts), sdio0(wp), lcd1(pwm), pmu* 20 mpp2 2 gpio, pmu, uart2(txd), sdio0(buspwr), sata(prsnt), 21 uart1(rts), pmu* 22 mpp3 3 gpio, pmu, uart2(rxd), sdio0(ledctrl), sata(act), 23 uart1(cts), lcd-spi(cs1), pmu* 24 mpp4 4 gpio, pmu, uart3(rts), sdio1(cd), spi1(miso), pmu* 25 mpp5 5 gpio, pmu, uart3(cts), sdio1(wp), spi1(cs), pmu* 26 mpp6 6 gpio, pmu, uart3(txd), sdio1(buspwr), spi1(mosi), pmu* [all …]
|