/linux-6.1.9/drivers/net/wireless/ath/ath9k/ ! |
D | channel.c | 23 static int ath_set_channel(struct ath_softc *sc) in ath_set_channel() argument 25 struct ath_hw *ah = sc->sc_ah; in ath_set_channel() 27 struct ieee80211_hw *hw = sc->hw; in ath_set_channel() 29 struct cfg80211_chan_def *chandef = &sc->cur_chan->chandef; in ath_set_channel() 47 ath_update_survey_stats(sc); in ath_set_channel() 57 if (!sc->cur_chan->offchannel && sc->cur_survey != &sc->survey[pos]) { in ath_set_channel() 58 if (sc->cur_survey) in ath_set_channel() 59 sc->cur_survey->filled &= ~SURVEY_INFO_IN_USE; in ath_set_channel() 61 sc->cur_survey = &sc->survey[pos]; in ath_set_channel() 63 memset(sc->cur_survey, 0, sizeof(struct survey_info)); in ath_set_channel() [all …]
|
D | main.c | 60 static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq, in ath9k_has_pending_frames() argument 78 acq = &sc->cur_chan->acq[txq->mac80211_qnum]; in ath9k_has_pending_frames() 87 static bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode) in ath9k_setpower() argument 92 spin_lock_irqsave(&sc->sc_pm_lock, flags); in ath9k_setpower() 93 ret = ath9k_hw_setpower(sc->sc_ah, mode); in ath9k_setpower() 94 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); in ath9k_setpower() 101 struct ath_softc *sc = from_timer(sc, t, sleep_timer); in ath_ps_full_sleep() local 102 struct ath_common *common = ath9k_hw_common(sc->sc_ah); in ath_ps_full_sleep() 110 ath9k_hw_setrxabort(sc->sc_ah, 1); in ath_ps_full_sleep() 111 ath9k_hw_stopdmarecv(sc->sc_ah, &reset); in ath_ps_full_sleep() [all …]
|
D | recv.c | 23 static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) in ath9k_check_auto_sleep() argument 25 return sc->ps_enabled && in ath9k_check_auto_sleep() 26 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP); in ath9k_check_auto_sleep() 37 static void ath_rx_buf_link(struct ath_softc *sc, struct ath_rxbuf *bf, in ath_rx_buf_link() argument 40 struct ath_hw *ah = sc->sc_ah; in ath_rx_buf_link() 63 if (sc->rx.rxlink) in ath_rx_buf_link() 64 *sc->rx.rxlink = bf->bf_daddr; in ath_rx_buf_link() 68 sc->rx.rxlink = &ds->ds_link; in ath_rx_buf_link() 71 static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_rxbuf *bf, in ath_rx_buf_relink() argument 74 if (sc->rx.buf_hold) in ath_rx_buf_relink() [all …]
|
D | beacon.c | 22 static void ath9k_reset_beacon_status(struct ath_softc *sc) in ath9k_reset_beacon_status() argument 24 sc->beacon.tx_processed = false; in ath9k_reset_beacon_status() 25 sc->beacon.tx_last = false; in ath9k_reset_beacon_status() 33 static void ath9k_beaconq_config(struct ath_softc *sc) in ath9k_beaconq_config() argument 35 struct ath_hw *ah = sc->sc_ah; in ath9k_beaconq_config() 40 ath9k_hw_get_txq_props(ah, sc->beacon.beaconq, &qi); in ath9k_beaconq_config() 42 if (sc->sc_ah->opmode == NL80211_IFTYPE_AP || in ath9k_beaconq_config() 43 sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT) { in ath9k_beaconq_config() 50 txq = sc->tx.txq_map[IEEE80211_AC_BE]; in ath9k_beaconq_config() 60 if (!ath9k_hw_set_txq_props(ah, sc->beacon.beaconq, &qi)) { in ath9k_beaconq_config() [all …]
|
D | gpio.c | 25 static void ath_fill_led_pin(struct ath_softc *sc) in ath_fill_led_pin() argument 27 struct ath_hw *ah = sc->sc_ah; in ath_fill_led_pin() 54 struct ath_softc *sc = container_of(led_cdev, struct ath_softc, led_cdev); in ath_led_brightness() local 57 if (sc->sc_ah->config.led_active_high) in ath_led_brightness() 60 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, val); in ath_led_brightness() 63 void ath_deinit_leds(struct ath_softc *sc) in ath_deinit_leds() argument 65 if (!sc->led_registered) in ath_deinit_leds() 68 ath_led_brightness(&sc->led_cdev, LED_OFF); in ath_deinit_leds() 69 led_classdev_unregister(&sc->led_cdev); in ath_deinit_leds() 71 ath9k_hw_gpio_free(sc->sc_ah, sc->sc_ah->led_pin); in ath_deinit_leds() [all …]
|
D | tx99.c | 19 static void ath9k_tx99_stop(struct ath_softc *sc) in ath9k_tx99_stop() argument 21 struct ath_hw *ah = sc->sc_ah; in ath9k_tx99_stop() 24 ath_drain_all_txq(sc); in ath9k_tx99_stop() 25 ath_startrecv(sc); in ath9k_tx99_stop() 30 ieee80211_wake_queues(sc->hw); in ath9k_tx99_stop() 32 kfree_skb(sc->tx99_skb); in ath9k_tx99_stop() 33 sc->tx99_skb = NULL; in ath9k_tx99_stop() 34 sc->tx99_state = false; in ath9k_tx99_stop() 36 ath9k_hw_tx99_stop(sc->sc_ah); in ath9k_tx99_stop() 40 static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc) in ath9k_build_tx99_skb() argument [all …]
|
D | init.c | 150 static void ath9k_deinit_softc(struct ath_softc *sc); 177 struct ath_softc *sc = (struct ath_softc *) common->priv; in ath9k_iowrite32() local 181 spin_lock_irqsave(&sc->sc_serial_rw, flags); in ath9k_iowrite32() 182 iowrite32(val, sc->mem + reg_offset); in ath9k_iowrite32() 183 spin_unlock_irqrestore(&sc->sc_serial_rw, flags); in ath9k_iowrite32() 185 iowrite32(val, sc->mem + reg_offset); in ath9k_iowrite32() 192 struct ath_softc *sc = (struct ath_softc *) common->priv; in ath9k_ioread32() local 197 spin_lock_irqsave(&sc->sc_serial_rw, flags); in ath9k_ioread32() 198 val = ioread32(sc->mem + reg_offset); in ath9k_ioread32() 199 spin_unlock_irqrestore(&sc->sc_serial_rw, flags); in ath9k_ioread32() [all …]
|
/linux-6.1.9/fs/xfs/scrub/ ! |
D | agheader.c | 26 struct xfs_scrub *sc, in xchk_superblock_xref() argument 29 struct xfs_mount *mp = sc->mp; in xchk_superblock_xref() 30 xfs_agnumber_t agno = sc->sm->sm_agno; in xchk_superblock_xref() 34 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) in xchk_superblock_xref() 39 error = xchk_ag_init_existing(sc, agno, &sc->sa); in xchk_superblock_xref() 40 if (!xchk_xref_process_error(sc, agno, agbno, &error)) in xchk_superblock_xref() 43 xchk_xref_is_used_space(sc, agbno, 1); in xchk_superblock_xref() 44 xchk_xref_is_not_inode_chunk(sc, agbno, 1); in xchk_superblock_xref() 45 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS); in xchk_superblock_xref() 46 xchk_xref_is_not_shared(sc, agbno, 1); in xchk_superblock_xref() [all …]
|
D | common.c | 67 struct xfs_scrub *sc, in __xchk_process_error() argument 80 sc->ip ? sc->ip : XFS_I(file_inode(sc->file)), in __xchk_process_error() 81 sc->sm, *error); in __xchk_process_error() 86 sc->sm->sm_flags |= errflag; in __xchk_process_error() 90 trace_xchk_op_error(sc, agno, bno, *error, in __xchk_process_error() 99 struct xfs_scrub *sc, in xchk_process_error() argument 104 return __xchk_process_error(sc, agno, bno, error, in xchk_process_error() 110 struct xfs_scrub *sc, in xchk_xref_process_error() argument 115 return __xchk_process_error(sc, agno, bno, error, in xchk_xref_process_error() 122 struct xfs_scrub *sc, in __xchk_fblock_process_error() argument [all …]
|
D | inode.c | 31 struct xfs_scrub *sc) in xchk_setup_inode() argument 39 error = xchk_get_inode(sc); in xchk_setup_inode() 45 return xchk_trans_alloc(sc, 0); in xchk_setup_inode() 51 sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL; in xchk_setup_inode() 52 xfs_ilock(sc->ip, sc->ilock_flags); in xchk_setup_inode() 53 error = xchk_trans_alloc(sc, 0); in xchk_setup_inode() 56 sc->ilock_flags |= XFS_ILOCK_EXCL; in xchk_setup_inode() 57 xfs_ilock(sc->ip, XFS_ILOCK_EXCL); in xchk_setup_inode() 69 struct xfs_scrub *sc, in xchk_inode_extsize() argument 78 fa = xfs_inode_validate_extsize(sc->mp, value, mode, flags); in xchk_inode_extsize() [all …]
|
D | rtbitmap.c | 23 struct xfs_scrub *sc) in xchk_setup_rt() argument 27 error = xchk_setup_fs(sc); in xchk_setup_rt() 31 sc->ilock_flags = XFS_ILOCK_EXCL | XFS_ILOCK_RTBITMAP; in xchk_setup_rt() 32 sc->ip = sc->mp->m_rbmip; in xchk_setup_rt() 33 xfs_ilock(sc->ip, sc->ilock_flags); in xchk_setup_rt() 48 struct xfs_scrub *sc = priv; in xchk_rtbitmap_rec() local 56 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); in xchk_rtbitmap_rec() 63 struct xfs_scrub *sc) in xchk_rtbitmap_check_extents() argument 65 struct xfs_mount *mp = sc->mp; in xchk_rtbitmap_check_extents() 72 if (xchk_should_terminate(sc, &error) || in xchk_rtbitmap_check_extents() [all …]
|
D | common.h | 16 struct xfs_scrub *sc, in xchk_should_terminate() argument 34 int xchk_trans_alloc(struct xfs_scrub *sc, uint resblks); 35 bool xchk_process_error(struct xfs_scrub *sc, xfs_agnumber_t agno, 37 bool xchk_fblock_process_error(struct xfs_scrub *sc, int whichfork, 40 bool xchk_xref_process_error(struct xfs_scrub *sc, 42 bool xchk_fblock_xref_process_error(struct xfs_scrub *sc, 45 void xchk_block_set_preen(struct xfs_scrub *sc, 47 void xchk_ino_set_preen(struct xfs_scrub *sc, xfs_ino_t ino); 49 void xchk_set_corrupt(struct xfs_scrub *sc); 50 void xchk_block_set_corrupt(struct xfs_scrub *sc, [all …]
|
D | repair.c | 42 struct xfs_scrub *sc) in xrep_attempt() argument 46 trace_xrep_attempt(XFS_I(file_inode(sc->file)), sc->sm, error); in xrep_attempt() 48 xchk_ag_btcur_free(&sc->sa); in xrep_attempt() 51 ASSERT(sc->ops->repair); in xrep_attempt() 52 error = sc->ops->repair(sc); in xrep_attempt() 53 trace_xrep_done(XFS_I(file_inode(sc->file)), sc->sm, error); in xrep_attempt() 60 sc->sm->sm_flags &= ~XFS_SCRUB_FLAGS_OUT; in xrep_attempt() 61 sc->flags |= XREP_ALREADY_FIXED; in xrep_attempt() 66 if (!(sc->flags & XCHK_TRY_HARDER)) { in xrep_attempt() 67 sc->flags |= XCHK_TRY_HARDER; in xrep_attempt() [all …]
|
D | parent.c | 23 struct xfs_scrub *sc) in xchk_setup_parent() argument 25 return xchk_setup_inode_contents(sc, 0); in xchk_setup_parent() 34 struct xfs_scrub *sc; member 62 if (xchk_should_terminate(spc->sc, &error)) in xchk_parent_actor() 71 struct xfs_scrub *sc, in xchk_parent_count_parent_dentries() argument 77 .ino = sc->ip->i_ino, in xchk_parent_count_parent_dentries() 78 .sc = sc, in xchk_parent_count_parent_dentries() 107 error = xfs_readdir(sc->tp, parent, &spc.dc, bufsize); in xchk_parent_count_parent_dentries() 130 struct xfs_scrub *sc, in xchk_parent_validate() argument 134 struct xfs_mount *mp = sc->mp; in xchk_parent_validate() [all …]
|
D | refcount.c | 25 struct xfs_scrub *sc) in xchk_setup_ag_refcountbt() argument 27 return xchk_setup_ag_btree(sc, false); in xchk_setup_ag_refcountbt() 74 struct xfs_scrub *sc; member 105 if (xchk_should_terminate(refchk->sc, &error)) in xchk_refcountbt_rmap_check() 113 xchk_btree_xref_set_corrupt(refchk->sc, cur, 0); in xchk_refcountbt_rmap_check() 271 struct xfs_scrub *sc, in xchk_refcountbt_xref_rmap() argument 275 .sc = sc, in xchk_refcountbt_xref_rmap() 287 if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm)) in xchk_refcountbt_xref_rmap() 297 error = xfs_rmap_query_range(sc->sa.rmap_cur, &low, &high, in xchk_refcountbt_xref_rmap() 299 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) in xchk_refcountbt_xref_rmap() [all …]
|
D | quota.c | 23 struct xfs_scrub *sc) in xchk_quota_to_dqtype() argument 25 switch (sc->sm->sm_type) { in xchk_quota_to_dqtype() 40 struct xfs_scrub *sc) in xchk_setup_quota() argument 45 if (!XFS_IS_QUOTA_ON(sc->mp)) in xchk_setup_quota() 48 dqtype = xchk_quota_to_dqtype(sc); in xchk_setup_quota() 52 if (!xfs_this_quota_on(sc->mp, dqtype)) in xchk_setup_quota() 55 error = xchk_setup_fs(sc); in xchk_setup_quota() 58 sc->ip = xfs_quota_inode(sc->mp, dqtype); in xchk_setup_quota() 59 xfs_ilock(sc->ip, XFS_ILOCK_EXCL); in xchk_setup_quota() 60 sc->ilock_flags = XFS_ILOCK_EXCL; in xchk_setup_quota() [all …]
|
D | rmap.c | 25 struct xfs_scrub *sc) in xchk_setup_ag_rmapbt() argument 27 return xchk_setup_ag_btree(sc, false); in xchk_setup_ag_rmapbt() 35 struct xfs_scrub *sc, in xchk_rmapbt_xref_refc() argument 46 if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm)) in xchk_rmapbt_xref_refc() 55 error = xfs_refcount_find_shared(sc->sa.refc_cur, irec->rm_startblock, in xchk_rmapbt_xref_refc() 57 if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur)) in xchk_rmapbt_xref_refc() 60 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0); in xchk_rmapbt_xref_refc() 66 struct xfs_scrub *sc, in xchk_rmapbt_xref() argument 72 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) in xchk_rmapbt_xref() 75 xchk_xref_is_used_space(sc, agbno, len); in xchk_rmapbt_xref() [all …]
|
/linux-6.1.9/drivers/clk/qcom/ ! |
D | gdsc.c | 59 static int gdsc_check_status(struct gdsc *sc, enum gdsc_status status) in gdsc_check_status() argument 65 if (sc->flags & POLL_CFG_GDSCR) in gdsc_check_status() 66 reg = sc->gdscr + CFG_GDSCR_OFFSET; in gdsc_check_status() 67 else if (sc->gds_hw_ctrl) in gdsc_check_status() 68 reg = sc->gds_hw_ctrl; in gdsc_check_status() 70 reg = sc->gdscr; in gdsc_check_status() 72 ret = regmap_read(sc->regmap, reg, &val); in gdsc_check_status() 76 if (sc->flags & POLL_CFG_GDSCR) { in gdsc_check_status() 95 static int gdsc_hwctrl(struct gdsc *sc, bool en) in gdsc_hwctrl() argument 99 return regmap_update_bits(sc->regmap, sc->gdscr, HW_CONTROL_MASK, val); in gdsc_hwctrl() [all …]
|
/linux-6.1.9/drivers/hid/ ! |
D | hid-sony.c | 615 static void sony_set_leds(struct sony_sc *sc); 617 static inline void sony_schedule_work(struct sony_sc *sc, in sony_schedule_work() argument 624 spin_lock_irqsave(&sc->lock, flags); in sony_schedule_work() 625 if (!sc->defer_initialization && sc->state_worker_initialized) in sony_schedule_work() 626 schedule_work(&sc->state_worker); in sony_schedule_work() 627 spin_unlock_irqrestore(&sc->lock, flags); in sony_schedule_work() 630 if (sc->hotplug_worker_initialized) in sony_schedule_work() 631 schedule_work(&sc->hotplug_worker); in sony_schedule_work() 638 struct sony_sc *sc = urb->context; in ghl_magic_poke_cb() local 641 hid_err(sc->hdev, "URB transfer failed : %d", urb->status); in ghl_magic_poke_cb() [all …]
|
/linux-6.1.9/drivers/infiniband/hw/hfi1/ ! |
D | pio.c | 15 static void sc_wait_for_packet_egress(struct send_context *sc, int pause); 519 static void cr_group_addresses(struct send_context *sc, dma_addr_t *dma) in cr_group_addresses() argument 521 u32 gc = group_context(sc->hw_context, sc->group); in cr_group_addresses() 522 u32 index = sc->hw_context & 0x7; in cr_group_addresses() 524 sc->hw_free = &sc->dd->cr_base[sc->node].va[gc].cr[index]; in cr_group_addresses() 526 &((struct credit_return *)sc->dd->cr_base[sc->node].dma)[gc]; in cr_group_addresses() 535 struct send_context *sc; in sc_halted() local 537 sc = container_of(work, struct send_context, halt_work); in sc_halted() 538 sc_restart(sc); in sc_halted() 551 u32 sc_mtu_to_threshold(struct send_context *sc, u32 mtu, u32 hdrqentsize) in sc_mtu_to_threshold() argument [all …]
|
/linux-6.1.9/drivers/scsi/snic/ ! |
D | snic_scsi.c | 22 #define snic_cmd_tag(sc) (scsi_cmd_to_rq(sc)->tag) argument 98 snic_io_lock_hash(struct snic *snic, struct scsi_cmnd *sc) in snic_io_lock_hash() argument 100 u32 hash = snic_cmd_tag(sc) & (SNIC_IO_LOCKS - 1); in snic_io_lock_hash() 115 struct scsi_cmnd *sc) in snic_release_req_buf() argument 120 SNIC_BUG_ON(!((CMD_STATE(sc) == SNIC_IOREQ_COMPLETE) || in snic_release_req_buf() 121 (CMD_STATE(sc) == SNIC_IOREQ_ABTS_COMPLETE) || in snic_release_req_buf() 122 (CMD_FLAGS(sc) & SNIC_DEV_RST_NOTSUP) || in snic_release_req_buf() 123 (CMD_FLAGS(sc) & SNIC_IO_INTERNAL_TERM_ISSUED) || in snic_release_req_buf() 124 (CMD_FLAGS(sc) & SNIC_DEV_RST_TERM_ISSUED) || in snic_release_req_buf() 125 (CMD_FLAGS(sc) & SNIC_SCSI_CLEANUP) || in snic_release_req_buf() [all …]
|
/linux-6.1.9/drivers/usb/atm/ ! |
D | ueagle-atm.c | 253 #define IS_OPERATIONAL(sc) \ argument 254 ((UEA_CHIP_VERSION(sc) != EAGLE_IV) ? \ 255 (GET_STATUS(sc->stats.phy.state) == 2) : \ 256 (sc->stats.phy.state == 7)) 547 #define uea_wait(sc, cond, timeo) \ argument 549 int _r = wait_event_interruptible_timeout(sc->sync_q, \ 558 if (sc->usbatm->atm_dev) \ 559 sc->usbatm->atm_dev->type = val; \ 564 if (sc->usbatm->atm_dev) \ 565 atm_dev_signal_change(sc->usbatm->atm_dev, val); \ [all …]
|
/linux-6.1.9/drivers/firmware/arm_scmi/ ! |
D | scmi_power_control.c | 119 struct scmi_syspower_conf *sc = reboot_nb_to_sconf(nb); in scmi_reboot_notifier() local 121 mutex_lock(&sc->state_mtx); in scmi_reboot_notifier() 125 if (sc->required_transition == SCMI_SYSTEM_SHUTDOWN) in scmi_reboot_notifier() 126 sc->state = SCMI_SYSPOWER_REBOOTING; in scmi_reboot_notifier() 129 if (sc->required_transition == SCMI_SYSTEM_COLDRESET || in scmi_reboot_notifier() 130 sc->required_transition == SCMI_SYSTEM_WARMRESET) in scmi_reboot_notifier() 131 sc->state = SCMI_SYSPOWER_REBOOTING; in scmi_reboot_notifier() 137 if (sc->state == SCMI_SYSPOWER_REBOOTING) { in scmi_reboot_notifier() 138 dev_dbg(sc->dev, "Reboot in progress...cancel delayed work.\n"); in scmi_reboot_notifier() 139 cancel_delayed_work_sync(&sc->forceful_work); in scmi_reboot_notifier() [all …]
|
/linux-6.1.9/drivers/md/ ! |
D | dm-stripe.c | 53 struct stripe_c *sc = container_of(work, struct stripe_c, in trigger_event() local 55 dm_table_event(sc->ti->table); in trigger_event() 61 static int get_stripe(struct dm_target *ti, struct stripe_c *sc, in get_stripe() argument 72 &sc->stripe[stripe].dev); in get_stripe() 76 sc->stripe[stripe].physical_start = start; in get_stripe() 87 struct stripe_c *sc; in stripe_ctr() local 132 sc = kmalloc(struct_size(sc, stripe, stripes), GFP_KERNEL); in stripe_ctr() 133 if (!sc) { in stripe_ctr() 139 INIT_WORK(&sc->trigger_event, trigger_event); in stripe_ctr() 142 sc->ti = ti; in stripe_ctr() [all …]
|
/linux-6.1.9/fs/ocfs2/cluster/ ! |
D | tcp.c | 62 #define SC_NODEF_ARGS(sc) sc->sc_node->nd_name, sc->sc_node->nd_num, \ argument 63 &sc->sc_node->nd_ipv4_address, \ 64 ntohs(sc->sc_node->nd_ipv4_port) 81 #define sclog(sc, fmt, args...) do { \ argument 82 typeof(sc) __sc = (sc); \ 128 static void o2net_sc_postpone_idle(struct o2net_sock_container *sc); 129 static void o2net_sc_reset_idle_timer(struct o2net_sock_container *sc); 158 struct o2net_sock_container *sc) in o2net_set_nst_sock_container() argument 160 nst->st_sc = sc; in o2net_set_nst_sock_container() 169 static inline void o2net_set_sock_timer(struct o2net_sock_container *sc) in o2net_set_sock_timer() argument [all …]
|