Lines Matching refs:s

90 int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,  in amdtp_stream_init()  argument
99 s->protocol = kzalloc(protocol_size, GFP_KERNEL); in amdtp_stream_init()
100 if (!s->protocol) in amdtp_stream_init()
103 s->unit = unit; in amdtp_stream_init()
104 s->direction = dir; in amdtp_stream_init()
105 s->flags = flags; in amdtp_stream_init()
106 s->context = ERR_PTR(-1); in amdtp_stream_init()
107 mutex_init(&s->mutex); in amdtp_stream_init()
108 s->packet_index = 0; in amdtp_stream_init()
110 init_waitqueue_head(&s->ready_wait); in amdtp_stream_init()
112 s->fmt = fmt; in amdtp_stream_init()
113 s->process_ctx_payloads = process_ctx_payloads; in amdtp_stream_init()
123 void amdtp_stream_destroy(struct amdtp_stream *s) in amdtp_stream_destroy() argument
126 if (s->protocol == NULL) in amdtp_stream_destroy()
129 WARN_ON(amdtp_stream_running(s)); in amdtp_stream_destroy()
130 kfree(s->protocol); in amdtp_stream_destroy()
131 mutex_destroy(&s->mutex); in amdtp_stream_destroy()
160 struct snd_interval *s = hw_param_interval(params, rule->var); in apply_constraint_to_size() local
172 t.min = roundup(s->min, step); in apply_constraint_to_size()
173 t.max = rounddown(s->max, step); in apply_constraint_to_size()
176 return snd_interval_refine(s, &t); in apply_constraint_to_size()
184 int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s, in amdtp_stream_add_pcm_hw_constraints() argument
217 if (!(s->flags & CIP_NO_HEADER)) in amdtp_stream_add_pcm_hw_constraints()
243 if (!(s->flags & CIP_BLOCKING)) in amdtp_stream_add_pcm_hw_constraints()
280 int amdtp_stream_set_parameters(struct amdtp_stream *s, unsigned int rate, in amdtp_stream_set_parameters() argument
292 s->sfc = sfc; in amdtp_stream_set_parameters()
293 s->data_block_quadlets = data_block_quadlets; in amdtp_stream_set_parameters()
294 s->syt_interval = amdtp_syt_intervals[sfc]; in amdtp_stream_set_parameters()
297 s->transfer_delay = TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE; in amdtp_stream_set_parameters()
300 if (s->flags & CIP_BLOCKING) in amdtp_stream_set_parameters()
301 s->transfer_delay += TICKS_PER_SECOND * s->syt_interval / rate; in amdtp_stream_set_parameters()
303 s->pcm_frame_multiplier = pcm_frame_multiplier; in amdtp_stream_set_parameters()
310 static int amdtp_stream_get_max_ctx_payload_size(struct amdtp_stream *s) in amdtp_stream_get_max_ctx_payload_size() argument
314 if (s->flags & CIP_JUMBO_PAYLOAD) in amdtp_stream_get_max_ctx_payload_size()
319 return s->syt_interval * s->data_block_quadlets * sizeof(__be32) * multiplier; in amdtp_stream_get_max_ctx_payload_size()
329 unsigned int amdtp_stream_get_max_payload(struct amdtp_stream *s) in amdtp_stream_get_max_payload() argument
333 if (!(s->flags & CIP_NO_HEADER)) in amdtp_stream_get_max_payload()
338 return cip_header_size + amdtp_stream_get_max_ctx_payload_size(s); in amdtp_stream_get_max_payload()
348 void amdtp_stream_pcm_prepare(struct amdtp_stream *s) in amdtp_stream_pcm_prepare() argument
350 s->pcm_buffer_pointer = 0; in amdtp_stream_pcm_prepare()
351 s->pcm_period_pointer = 0; in amdtp_stream_pcm_prepare()
355 #define prev_packet_desc(s, desc) \ argument
356 list_prev_entry_circular(desc, &s->packet_descs_list, link)
358 static void pool_blocking_data_blocks(struct amdtp_stream *s, struct seq_desc *descs, in pool_blocking_data_blocks() argument
361 const unsigned int syt_interval = s->syt_interval; in pool_blocking_data_blocks()
376 static void pool_ideal_nonblocking_data_blocks(struct amdtp_stream *s, struct seq_desc *descs, in pool_ideal_nonblocking_data_blocks() argument
380 const enum cip_sfc sfc = s->sfc; in pool_ideal_nonblocking_data_blocks()
381 unsigned int state = s->ctx_data.rx.data_block_state; in pool_ideal_nonblocking_data_blocks()
415 s->ctx_data.rx.data_block_state = state; in pool_ideal_nonblocking_data_blocks()
457 static void pool_ideal_syt_offsets(struct amdtp_stream *s, struct seq_desc *descs, in pool_ideal_syt_offsets() argument
460 const enum cip_sfc sfc = s->sfc; in pool_ideal_syt_offsets()
461 unsigned int last = s->ctx_data.rx.last_syt_offset; in pool_ideal_syt_offsets()
462 unsigned int state = s->ctx_data.rx.syt_offset_state; in pool_ideal_syt_offsets()
473 s->ctx_data.rx.last_syt_offset = last; in pool_ideal_syt_offsets()
474 s->ctx_data.rx.syt_offset_state = state; in pool_ideal_syt_offsets()
502 static unsigned int calculate_cached_cycle_count(struct amdtp_stream *s, unsigned int head) in calculate_cached_cycle_count() argument
504 const unsigned int cache_size = s->ctx_data.tx.cache.size; in calculate_cached_cycle_count()
505 unsigned int cycles = s->ctx_data.tx.cache.pos; in calculate_cached_cycle_count()
514 static void cache_seq(struct amdtp_stream *s, const struct pkt_desc *src, unsigned int desc_count) in cache_seq() argument
516 const unsigned int transfer_delay = s->transfer_delay; in cache_seq()
517 const unsigned int cache_size = s->ctx_data.tx.cache.size; in cache_seq()
518 struct seq_desc *cache = s->ctx_data.tx.cache.descs; in cache_seq()
519 unsigned int cache_pos = s->ctx_data.tx.cache.pos; in cache_seq()
520 bool aware_syt = !(s->flags & CIP_UNAWARE_SYT); in cache_seq()
533 src = amdtp_stream_next_packet_desc(s, src); in cache_seq()
536 s->ctx_data.tx.cache.pos = cache_pos; in cache_seq()
539 static void pool_ideal_seq_descs(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size, in pool_ideal_seq_descs() argument
542 pool_ideal_syt_offsets(s, descs, size, pos, count); in pool_ideal_seq_descs()
544 if (s->flags & CIP_BLOCKING) in pool_ideal_seq_descs()
545 pool_blocking_data_blocks(s, descs, size, pos, count); in pool_ideal_seq_descs()
547 pool_ideal_nonblocking_data_blocks(s, descs, size, pos, count); in pool_ideal_seq_descs()
550 static void pool_replayed_seq(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size, in pool_replayed_seq() argument
553 struct amdtp_stream *target = s->ctx_data.rx.replay_target; in pool_replayed_seq()
556 unsigned int cache_pos = s->ctx_data.rx.cache_pos; in pool_replayed_seq()
565 s->ctx_data.rx.cache_pos = cache_pos; in pool_replayed_seq()
568 static void pool_seq_descs(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size, in pool_seq_descs() argument
571 struct amdtp_domain *d = s->domain; in pool_seq_descs()
572 void (*pool_seq_descs)(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size, in pool_seq_descs()
575 if (!d->replay.enable || !s->ctx_data.rx.replay_target) { in pool_seq_descs()
581 struct amdtp_stream *tx = s->ctx_data.rx.replay_target; in pool_seq_descs()
583 const unsigned int cache_pos = s->ctx_data.rx.cache_pos; in pool_seq_descs()
593 pool_seq_descs(s, descs, size, pos, count); in pool_seq_descs()
596 static void update_pcm_pointers(struct amdtp_stream *s, in update_pcm_pointers() argument
602 ptr = s->pcm_buffer_pointer + frames; in update_pcm_pointers()
605 WRITE_ONCE(s->pcm_buffer_pointer, ptr); in update_pcm_pointers()
607 s->pcm_period_pointer += frames; in update_pcm_pointers()
608 if (s->pcm_period_pointer >= pcm->runtime->period_size) { in update_pcm_pointers()
609 s->pcm_period_pointer -= pcm->runtime->period_size; in update_pcm_pointers()
627 static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params, in queue_packet() argument
633 params->tag = s->tag; in queue_packet()
636 err = fw_iso_context_queue(s->context, params, &s->buffer.iso_buffer, in queue_packet()
637 s->buffer.packets[s->packet_index].offset); in queue_packet()
639 dev_err(&s->unit->device, "queueing error: %d\n", err); in queue_packet()
643 if (++s->packet_index >= s->queue_size) in queue_packet()
644 s->packet_index = 0; in queue_packet()
649 static inline int queue_out_packet(struct amdtp_stream *s, in queue_out_packet() argument
654 return queue_packet(s, params, sched_irq); in queue_out_packet()
657 static inline int queue_in_packet(struct amdtp_stream *s, in queue_in_packet() argument
661 params->header_length = s->ctx_data.tx.ctx_header_size; in queue_in_packet()
662 params->payload_length = s->ctx_data.tx.max_ctx_payload_length; in queue_in_packet()
664 return queue_packet(s, params, false); in queue_in_packet()
667 static void generate_cip_header(struct amdtp_stream *s, __be32 cip_header[2], in generate_cip_header() argument
670 cip_header[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) | in generate_cip_header()
671 (s->data_block_quadlets << CIP_DBS_SHIFT) | in generate_cip_header()
672 ((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) | in generate_cip_header()
675 ((s->fmt << CIP_FMT_SHIFT) & CIP_FMT_MASK) | in generate_cip_header()
676 ((s->ctx_data.rx.fdf << CIP_FDF_SHIFT) & CIP_FDF_MASK) | in generate_cip_header()
680 static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle, in build_it_pkt_header() argument
689 payload_length = data_blocks * sizeof(__be32) * s->data_block_quadlets; in build_it_pkt_header()
694 generate_cip_header(s, cip_header, data_block_counter, syt); in build_it_pkt_header()
700 trace_amdtp_packet(s, cycle, cip_header, payload_length + header_length, data_blocks, in build_it_pkt_header()
701 data_block_counter, s->packet_index, index, curr_cycle_time); in build_it_pkt_header()
704 static int check_cip_header(struct amdtp_stream *s, const __be32 *buf, in check_cip_header() argument
725 (!(s->flags & CIP_HEADER_WITHOUT_EOH))) { in check_cip_header()
726 dev_info_ratelimited(&s->unit->device, in check_cip_header()
735 if (sph != s->sph || fmt != s->fmt) { in check_cip_header()
736 dev_info_ratelimited(&s->unit->device, in check_cip_header()
751 dev_err(&s->unit->device, in check_cip_header()
756 if (s->flags & CIP_WRONG_DBS) in check_cip_header()
757 data_block_quadlets = s->data_block_quadlets; in check_cip_header()
764 if (*data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) && in check_cip_header()
768 if ((dbc == 0x00 && (s->flags & CIP_SKIP_DBC_ZERO_CHECK)) || in check_cip_header()
771 } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) { in check_cip_header()
776 if (*data_blocks > 0 && s->ctx_data.tx.dbc_interval > 0) in check_cip_header()
777 dbc_interval = s->ctx_data.tx.dbc_interval; in check_cip_header()
785 dev_err(&s->unit->device, in check_cip_header()
793 if (!(s->flags & CIP_UNAWARE_SYT)) in check_cip_header()
799 static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle, in parse_ir_ctx_header() argument
812 if (!(s->flags & CIP_NO_HEADER)) in parse_ir_ctx_header()
817 if (payload_length > cip_header_size + s->ctx_data.tx.max_ctx_payload_length) { in parse_ir_ctx_header()
818 dev_err(&s->unit->device, in parse_ir_ctx_header()
820 payload_length, cip_header_size + s->ctx_data.tx.max_ctx_payload_length); in parse_ir_ctx_header()
829 err = check_cip_header(s, cip_header, payload_length - cip_header_size, in parse_ir_ctx_header()
841 *data_blocks = payload_length / sizeof(__be32) / s->data_block_quadlets; in parse_ir_ctx_header()
848 trace_amdtp_packet(s, cycle, cip_header, payload_length, *data_blocks, in parse_ir_ctx_header()
905 static int generate_tx_packet_descs(struct amdtp_stream *s, struct pkt_desc *desc, in generate_tx_packet_descs() argument
909 unsigned int next_cycle = s->next_cycle; in generate_tx_packet_descs()
910 unsigned int dbc = s->data_block_counter; in generate_tx_packet_descs()
911 unsigned int packet_index = s->packet_index; in generate_tx_packet_descs()
912 unsigned int queue_size = s->queue_size; in generate_tx_packet_descs()
918 (void)fw_card_read_cycle_time(fw_parent_device(s->unit)->card, &curr_cycle_time); in generate_tx_packet_descs()
930 if (s->flags & CIP_NO_HEADER) { in generate_tx_packet_descs()
945 desc = amdtp_stream_next_packet_desc(s, desc); in generate_tx_packet_descs()
948 } else if (s->flags & CIP_JUMBO_PAYLOAD) { in generate_tx_packet_descs()
957 dev_err(&s->unit->device, "Detect discontinuity of cycle: %d %d\n", in generate_tx_packet_descs()
963 err = parse_ir_ctx_header(s, cycle, ctx_header, &data_blocks, &dbc, &syt, in generate_tx_packet_descs()
972 desc->ctx_payload = s->buffer.packets[packet_index].buffer; in generate_tx_packet_descs()
974 if (!(s->flags & CIP_DBC_IS_END_EVENT)) in generate_tx_packet_descs()
978 desc = amdtp_stream_next_packet_desc(s, desc); in generate_tx_packet_descs()
980 ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header); in generate_tx_packet_descs()
984 s->next_cycle = next_cycle; in generate_tx_packet_descs()
985 s->data_block_counter = dbc; in generate_tx_packet_descs()
1001 static void generate_rx_packet_descs(struct amdtp_stream *s, struct pkt_desc *desc, in generate_rx_packet_descs() argument
1004 struct seq_desc *seq_descs = s->ctx_data.rx.seq.descs; in generate_rx_packet_descs()
1005 unsigned int seq_size = s->ctx_data.rx.seq.size; in generate_rx_packet_descs()
1006 unsigned int seq_pos = s->ctx_data.rx.seq.pos; in generate_rx_packet_descs()
1007 unsigned int dbc = s->data_block_counter; in generate_rx_packet_descs()
1008 bool aware_syt = !(s->flags & CIP_UNAWARE_SYT); in generate_rx_packet_descs()
1011 pool_seq_descs(s, seq_descs, seq_size, seq_pos, packet_count); in generate_rx_packet_descs()
1014 unsigned int index = (s->packet_index + i) % s->queue_size; in generate_rx_packet_descs()
1017 desc->cycle = compute_ohci_it_cycle(*ctx_header, s->queue_size); in generate_rx_packet_descs()
1020 desc->syt = compute_syt(seq->syt_offset, desc->cycle, s->transfer_delay); in generate_rx_packet_descs()
1026 if (s->flags & CIP_DBC_IS_END_EVENT) in generate_rx_packet_descs()
1031 if (!(s->flags & CIP_DBC_IS_END_EVENT)) in generate_rx_packet_descs()
1034 desc->ctx_payload = s->buffer.packets[index].buffer; in generate_rx_packet_descs()
1037 desc = amdtp_stream_next_packet_desc(s, desc); in generate_rx_packet_descs()
1042 s->data_block_counter = dbc; in generate_rx_packet_descs()
1043 s->ctx_data.rx.seq.pos = seq_pos; in generate_rx_packet_descs()
1046 static inline void cancel_stream(struct amdtp_stream *s) in cancel_stream() argument
1048 s->packet_index = -1; in cancel_stream()
1050 amdtp_stream_pcm_abort(s); in cancel_stream()
1051 WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN); in cancel_stream()
1054 static snd_pcm_sframes_t compute_pcm_extra_delay(struct amdtp_stream *s, in compute_pcm_extra_delay() argument
1069 desc = amdtp_stream_next_packet_desc(s, desc); in compute_pcm_extra_delay()
1072 err = fw_card_read_cycle_time(fw_parent_device(s->unit)->card, &cycle_time); in compute_pcm_extra_delay()
1080 if (s->direction == AMDTP_IN_STREAM) { in compute_pcm_extra_delay()
1091 desc = amdtp_stream_next_packet_desc(s, desc); in compute_pcm_extra_delay()
1104 desc = prev_packet_desc(s, desc); in compute_pcm_extra_delay()
1108 return data_block_count * s->pcm_frame_multiplier; in compute_pcm_extra_delay()
1111 static void process_ctx_payloads(struct amdtp_stream *s, in process_ctx_payloads() argument
1118 pcm = READ_ONCE(s->pcm); in process_ctx_payloads()
1119 s->process_ctx_payloads(s, desc, count, pcm); in process_ctx_payloads()
1124 pcm->runtime->delay = compute_pcm_extra_delay(s, desc, count); in process_ctx_payloads()
1128 desc = amdtp_stream_next_packet_desc(s, desc); in process_ctx_payloads()
1131 update_pcm_pointers(s, pcm, data_block_count * s->pcm_frame_multiplier); in process_ctx_payloads()
1138 struct amdtp_stream *s = private_data; in process_rx_packets() local
1139 const struct amdtp_domain *d = s->domain; in process_rx_packets()
1142 unsigned int event_count = s->ctx_data.rx.event_count; in process_rx_packets()
1143 struct pkt_desc *desc = s->packet_descs_cursor; in process_rx_packets()
1150 if (s->packet_index < 0) in process_rx_packets()
1156 generate_rx_packet_descs(s, desc, ctx_header, packets); in process_rx_packets()
1158 process_ctx_payloads(s, desc, packets); in process_rx_packets()
1160 if (!(s->flags & CIP_NO_HEADER)) in process_rx_packets()
1165 if (s == d->irq_target) { in process_rx_packets()
1169 struct snd_pcm_substream *pcm = READ_ONCE(s->pcm); in process_rx_packets()
1176 (void)fw_card_read_cycle_time(fw_parent_device(s->unit)->card, &curr_cycle_time); in process_rx_packets()
1185 build_it_pkt_header(s, desc->cycle, &template.params, pkt_header_length, in process_rx_packets()
1189 if (s == s->domain->irq_target) { in process_rx_packets()
1197 if (queue_out_packet(s, &template.params, sched_irq) < 0) { in process_rx_packets()
1198 cancel_stream(s); in process_rx_packets()
1202 desc = amdtp_stream_next_packet_desc(s, desc); in process_rx_packets()
1205 s->ctx_data.rx.event_count = event_count; in process_rx_packets()
1206 s->packet_descs_cursor = desc; in process_rx_packets()
1212 struct amdtp_stream *s = private_data; in skip_rx_packets() local
1213 struct amdtp_domain *d = s->domain; in skip_rx_packets()
1219 if (s->packet_index < 0) in skip_rx_packets()
1224 cycle = compute_ohci_it_cycle(ctx_header[packets - 1], s->queue_size); in skip_rx_packets()
1225 s->next_cycle = increment_ohci_cycle_count(cycle, 1); in skip_rx_packets()
1232 bool sched_irq = (s == d->irq_target && i == packets - 1); in skip_rx_packets()
1234 if (queue_out_packet(s, &params, sched_irq) < 0) { in skip_rx_packets()
1235 cancel_stream(s); in skip_rx_packets()
1247 struct amdtp_stream *s = private_data; in process_rx_packets_intermediately() local
1248 struct amdtp_domain *d = s->domain; in process_rx_packets_intermediately()
1250 const unsigned int queue_size = s->queue_size; in process_rx_packets_intermediately()
1254 if (s->packet_index < 0) in process_rx_packets_intermediately()
1273 if (amdtp_streaming_error(s)) in process_rx_packets_intermediately()
1281 s->ready_processing = true; in process_rx_packets_intermediately()
1282 wake_up(&s->ready_wait); in process_rx_packets_intermediately()
1285 s->ctx_data.rx.cache_pos = 0; in process_rx_packets_intermediately()
1288 if (amdtp_streaming_error(s)) in process_rx_packets_intermediately()
1291 if (s == d->irq_target) in process_rx_packets_intermediately()
1292 s->context->callback.sc = irq_target_callback; in process_rx_packets_intermediately()
1294 s->context->callback.sc = process_rx_packets; in process_rx_packets_intermediately()
1301 struct amdtp_stream *s = private_data; in process_tx_packets() local
1303 struct pkt_desc *desc = s->packet_descs_cursor; in process_tx_packets()
1309 if (s->packet_index < 0) in process_tx_packets()
1313 packet_count = header_length / s->ctx_data.tx.ctx_header_size; in process_tx_packets()
1316 err = generate_tx_packet_descs(s, desc, ctx_header, packet_count, &desc_count); in process_tx_packets()
1319 cancel_stream(s); in process_tx_packets()
1323 struct amdtp_domain *d = s->domain; in process_tx_packets()
1325 process_ctx_payloads(s, desc, desc_count); in process_tx_packets()
1328 cache_seq(s, desc, desc_count); in process_tx_packets()
1331 desc = amdtp_stream_next_packet_desc(s, desc); in process_tx_packets()
1332 s->packet_descs_cursor = desc; in process_tx_packets()
1338 if (queue_in_packet(s, &params) < 0) { in process_tx_packets()
1339 cancel_stream(s); in process_tx_packets()
1348 struct amdtp_stream *s = private_data; in drop_tx_packets() local
1354 if (s->packet_index < 0) in drop_tx_packets()
1357 packets = header_length / s->ctx_data.tx.ctx_header_size; in drop_tx_packets()
1359 ctx_header += (packets - 1) * s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header); in drop_tx_packets()
1361 s->next_cycle = increment_ohci_cycle_count(cycle, 1); in drop_tx_packets()
1366 if (queue_in_packet(s, &params) < 0) { in drop_tx_packets()
1367 cancel_stream(s); in drop_tx_packets()
1376 struct amdtp_stream *s = private_data; in process_tx_packets_intermediately() local
1377 struct amdtp_domain *d = s->domain; in process_tx_packets_intermediately()
1382 if (s->packet_index < 0) in process_tx_packets_intermediately()
1385 packets = header_length / s->ctx_data.tx.ctx_header_size; in process_tx_packets_intermediately()
1395 ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(__be32); in process_tx_packets_intermediately()
1402 size_t length = s->ctx_data.tx.ctx_header_size * offset; in process_tx_packets_intermediately()
1404 drop_tx_packets(context, tstamp, length, ctx_header, s); in process_tx_packets_intermediately()
1405 if (amdtp_streaming_error(s)) in process_tx_packets_intermediately()
1413 s->ready_processing = true; in process_tx_packets_intermediately()
1414 wake_up(&s->ready_wait); in process_tx_packets_intermediately()
1416 process_tx_packets(context, tstamp, header_length, ctx_header, s); in process_tx_packets_intermediately()
1417 if (amdtp_streaming_error(s)) in process_tx_packets_intermediately()
1427 struct amdtp_stream *s = private_data; in drop_tx_packets_initially() local
1428 struct amdtp_domain *d = s->domain; in drop_tx_packets_initially()
1434 if (s->packet_index < 0) in drop_tx_packets_initially()
1437 count = header_length / s->ctx_data.tx.ctx_header_size; in drop_tx_packets_initially()
1447 if (s->flags & CIP_NO_HEADER) { in drop_tx_packets_initially()
1448 data_blocks = payload_quads / s->data_block_quadlets; in drop_tx_packets_initially()
1457 if (s->flags & CIP_UNAWARE_SYT) { in drop_tx_packets_initially()
1458 data_blocks = payload_quads / s->data_block_quadlets; in drop_tx_packets_initially()
1467 data_blocks = payload_quads / s->data_block_quadlets; in drop_tx_packets_initially()
1474 ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(__be32); in drop_tx_packets_initially()
1477 drop_tx_packets(context, tstamp, header_length, header, s); in drop_tx_packets_initially()
1480 s->ctx_data.tx.event_starts = true; in drop_tx_packets_initially()
1488 list_for_each_entry(s, &d->streams, list) { in drop_tx_packets_initially()
1489 if (s->direction == AMDTP_IN_STREAM) { in drop_tx_packets_initially()
1491 if (s->ctx_data.tx.event_starts) in drop_tx_packets_initially()
1499 list_for_each_entry(s, &d->streams, list) { in drop_tx_packets_initially()
1500 if (s->direction != AMDTP_IN_STREAM) in drop_tx_packets_initially()
1503 next_cycle = increment_ohci_cycle_count(s->next_cycle, in drop_tx_packets_initially()
1509 s->context->callback.sc = process_tx_packets_intermediately; in drop_tx_packets_initially()
1519 struct amdtp_stream *s; in process_ctxs_in_domain() local
1521 list_for_each_entry(s, &d->streams, list) { in process_ctxs_in_domain()
1522 if (s != d->irq_target && amdtp_stream_running(s)) in process_ctxs_in_domain()
1523 fw_iso_context_flush_completions(s->context); in process_ctxs_in_domain()
1525 if (amdtp_streaming_error(s)) in process_ctxs_in_domain()
1534 list_for_each_entry(s, &d->streams, list) { in process_ctxs_in_domain()
1535 if (amdtp_stream_running(s)) in process_ctxs_in_domain()
1536 cancel_stream(s); in process_ctxs_in_domain()
1543 struct amdtp_stream *s = private_data; in irq_target_callback() local
1544 struct amdtp_domain *d = s->domain; in irq_target_callback()
1553 struct amdtp_stream *s = private_data; in irq_target_callback_intermediately() local
1554 struct amdtp_domain *d = s->domain; in irq_target_callback_intermediately()
1563 struct amdtp_stream *s = private_data; in irq_target_callback_skip() local
1564 struct amdtp_domain *d = s->domain; in irq_target_callback_skip()
1597 unsigned int cycle = s->next_cycle; in irq_target_callback_skip()
1598 list_for_each_entry(s, &d->streams, list) { in irq_target_callback_skip()
1599 if (s->direction != AMDTP_OUT_STREAM) in irq_target_callback_skip()
1602 if (compare_ohci_cycle_count(s->next_cycle, cycle) > 0) in irq_target_callback_skip()
1603 cycle = s->next_cycle; in irq_target_callback_skip()
1605 if (s == d->irq_target) in irq_target_callback_skip()
1606 s->context->callback.sc = irq_target_callback_intermediately; in irq_target_callback_skip()
1608 s->context->callback.sc = process_rx_packets_intermediately; in irq_target_callback_skip()
1621 struct amdtp_stream *s = private_data; in amdtp_stream_first_callback() local
1622 struct amdtp_domain *d = s->domain; in amdtp_stream_first_callback()
1624 if (s->direction == AMDTP_IN_STREAM) { in amdtp_stream_first_callback()
1627 if (s == d->irq_target) in amdtp_stream_first_callback()
1633 context->callback.sc(context, tstamp, header_length, header, s); in amdtp_stream_first_callback()
1648 static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed, in amdtp_stream_start() argument
1651 bool is_irq_target = (s == s->domain->irq_target); in amdtp_stream_start()
1658 mutex_lock(&s->mutex); in amdtp_stream_start()
1660 if (WARN_ON(amdtp_stream_running(s) || in amdtp_stream_start()
1661 (s->data_block_quadlets < 1))) { in amdtp_stream_start()
1666 if (s->direction == AMDTP_IN_STREAM) { in amdtp_stream_start()
1673 s->data_block_counter = UINT_MAX; in amdtp_stream_start()
1675 s->data_block_counter = 0; in amdtp_stream_start()
1679 if (s->direction == AMDTP_IN_STREAM) { in amdtp_stream_start()
1682 if (!(s->flags & CIP_NO_HEADER)) in amdtp_stream_start()
1691 max_ctx_payload_size = amdtp_stream_get_max_ctx_payload_size(s); in amdtp_stream_start()
1693 err = iso_packets_buffer_init(&s->buffer, s->unit, queue_size, max_ctx_payload_size, dir); in amdtp_stream_start()
1696 s->queue_size = queue_size; in amdtp_stream_start()
1698 s->context = fw_iso_context_create(fw_parent_device(s->unit)->card, in amdtp_stream_start()
1700 amdtp_stream_first_callback, s); in amdtp_stream_start()
1701 if (IS_ERR(s->context)) { in amdtp_stream_start()
1702 err = PTR_ERR(s->context); in amdtp_stream_start()
1704 dev_err(&s->unit->device, in amdtp_stream_start()
1709 amdtp_stream_update(s); in amdtp_stream_start()
1711 if (s->direction == AMDTP_IN_STREAM) { in amdtp_stream_start()
1712 s->ctx_data.tx.max_ctx_payload_length = max_ctx_payload_size; in amdtp_stream_start()
1713 s->ctx_data.tx.ctx_header_size = ctx_header_size; in amdtp_stream_start()
1714 s->ctx_data.tx.event_starts = false; in amdtp_stream_start()
1716 if (s->domain->replay.enable) { in amdtp_stream_start()
1719 s->ctx_data.tx.cache.size = max_t(unsigned int, s->syt_interval * 2, in amdtp_stream_start()
1721 s->ctx_data.tx.cache.pos = 0; in amdtp_stream_start()
1722 s->ctx_data.tx.cache.descs = kcalloc(s->ctx_data.tx.cache.size, in amdtp_stream_start()
1723 sizeof(*s->ctx_data.tx.cache.descs), GFP_KERNEL); in amdtp_stream_start()
1724 if (!s->ctx_data.tx.cache.descs) { in amdtp_stream_start()
1743 s->ctx_data.rx.seq.descs = kcalloc(queue_size, sizeof(*s->ctx_data.rx.seq.descs), GFP_KERNEL); in amdtp_stream_start()
1744 if (!s->ctx_data.rx.seq.descs) { in amdtp_stream_start()
1748 s->ctx_data.rx.seq.size = queue_size; in amdtp_stream_start()
1749 s->ctx_data.rx.seq.pos = 0; in amdtp_stream_start()
1751 entry = &initial_state[s->sfc]; in amdtp_stream_start()
1752 s->ctx_data.rx.data_block_state = entry->data_block; in amdtp_stream_start()
1753 s->ctx_data.rx.syt_offset_state = entry->syt_offset; in amdtp_stream_start()
1754 s->ctx_data.rx.last_syt_offset = TICKS_PER_CYCLE; in amdtp_stream_start()
1756 s->ctx_data.rx.event_count = 0; in amdtp_stream_start()
1759 if (s->flags & CIP_NO_HEADER) in amdtp_stream_start()
1760 s->tag = TAG_NO_CIP_HEADER; in amdtp_stream_start()
1762 s->tag = TAG_CIP; in amdtp_stream_start()
1768 descs = kcalloc(s->queue_size + 8, sizeof(*descs), GFP_KERNEL); in amdtp_stream_start()
1773 s->packet_descs = descs; in amdtp_stream_start()
1775 INIT_LIST_HEAD(&s->packet_descs_list); in amdtp_stream_start()
1776 for (i = 0; i < s->queue_size; ++i) { in amdtp_stream_start()
1778 list_add_tail(&descs->link, &s->packet_descs_list); in amdtp_stream_start()
1781 s->packet_descs_cursor = list_first_entry(&s->packet_descs_list, struct pkt_desc, link); in amdtp_stream_start()
1783 s->packet_index = 0; in amdtp_stream_start()
1787 if (s->direction == AMDTP_IN_STREAM) { in amdtp_stream_start()
1788 err = queue_in_packet(s, &params); in amdtp_stream_start()
1796 sched_irq = !((s->packet_index + 1) % in amdtp_stream_start()
1800 err = queue_out_packet(s, &params, sched_irq); in amdtp_stream_start()
1804 } while (s->packet_index > 0); in amdtp_stream_start()
1808 if ((s->flags & CIP_EMPTY_WITH_TAG0) || (s->flags & CIP_NO_HEADER)) in amdtp_stream_start()
1811 s->ready_processing = false; in amdtp_stream_start()
1812 err = fw_iso_context_start(s->context, -1, 0, tag); in amdtp_stream_start()
1816 mutex_unlock(&s->mutex); in amdtp_stream_start()
1820 kfree(s->packet_descs); in amdtp_stream_start()
1821 s->packet_descs = NULL; in amdtp_stream_start()
1823 if (s->direction == AMDTP_OUT_STREAM) { in amdtp_stream_start()
1824 kfree(s->ctx_data.rx.seq.descs); in amdtp_stream_start()
1826 if (s->domain->replay.enable) in amdtp_stream_start()
1827 kfree(s->ctx_data.tx.cache.descs); in amdtp_stream_start()
1829 fw_iso_context_destroy(s->context); in amdtp_stream_start()
1830 s->context = ERR_PTR(-1); in amdtp_stream_start()
1832 iso_packets_buffer_destroy(&s->buffer, s->unit); in amdtp_stream_start()
1834 mutex_unlock(&s->mutex); in amdtp_stream_start()
1847 struct amdtp_stream *s) in amdtp_domain_stream_pcm_pointer() argument
1859 return READ_ONCE(s->pcm_buffer_pointer); in amdtp_domain_stream_pcm_pointer()
1870 int amdtp_domain_stream_pcm_ack(struct amdtp_domain *d, struct amdtp_stream *s) in amdtp_domain_stream_pcm_ack() argument
1887 void amdtp_stream_update(struct amdtp_stream *s) in amdtp_stream_update() argument
1890 WRITE_ONCE(s->source_node_id_field, in amdtp_stream_update()
1891 (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & CIP_SID_MASK); in amdtp_stream_update()
1902 static void amdtp_stream_stop(struct amdtp_stream *s) in amdtp_stream_stop() argument
1904 mutex_lock(&s->mutex); in amdtp_stream_stop()
1906 if (!amdtp_stream_running(s)) { in amdtp_stream_stop()
1907 mutex_unlock(&s->mutex); in amdtp_stream_stop()
1911 fw_iso_context_stop(s->context); in amdtp_stream_stop()
1912 fw_iso_context_destroy(s->context); in amdtp_stream_stop()
1913 s->context = ERR_PTR(-1); in amdtp_stream_stop()
1914 iso_packets_buffer_destroy(&s->buffer, s->unit); in amdtp_stream_stop()
1915 kfree(s->packet_descs); in amdtp_stream_stop()
1916 s->packet_descs = NULL; in amdtp_stream_stop()
1918 if (s->direction == AMDTP_OUT_STREAM) { in amdtp_stream_stop()
1919 kfree(s->ctx_data.rx.seq.descs); in amdtp_stream_stop()
1921 if (s->domain->replay.enable) in amdtp_stream_stop()
1922 kfree(s->ctx_data.tx.cache.descs); in amdtp_stream_stop()
1925 mutex_unlock(&s->mutex); in amdtp_stream_stop()
1935 void amdtp_stream_pcm_abort(struct amdtp_stream *s) in amdtp_stream_pcm_abort() argument
1939 pcm = READ_ONCE(s->pcm); in amdtp_stream_pcm_abort()
1977 int amdtp_domain_add_stream(struct amdtp_domain *d, struct amdtp_stream *s, in amdtp_domain_add_stream() argument
1983 if (s == tmp) in amdtp_domain_add_stream()
1987 list_add(&s->list, &d->streams); in amdtp_domain_add_stream()
1989 s->channel = channel; in amdtp_domain_add_stream()
1990 s->speed = speed; in amdtp_domain_add_stream()
1991 s->domain = d; in amdtp_domain_add_stream()
2009 struct amdtp_stream *s; in make_association() local
2011 list_for_each_entry(s, &d->streams, list) { in make_association()
2012 if (s->direction == AMDTP_IN_STREAM) { in make_association()
2014 tx = s; in make_association()
2023 list_for_each_entry(s, &d->streams, list) { in make_association()
2024 if (s->direction == AMDTP_IN_STREAM) { in make_association()
2025 tx = s; in make_association()
2059 struct amdtp_stream *s; in amdtp_domain_start() local
2072 list_for_each_entry(s, &d->streams, list) { in amdtp_domain_start()
2073 if (s->direction == AMDTP_OUT_STREAM) { in amdtp_domain_start()
2080 d->irq_target = s; in amdtp_domain_start()
2095 list_for_each_entry(s, &d->streams, list) { in amdtp_domain_start()
2098 if (s->direction == AMDTP_OUT_STREAM && s == d->irq_target) { in amdtp_domain_start()
2104 err = amdtp_stream_start(s, s->channel, s->speed, queue_size, idle_irq_interval); in amdtp_domain_start()
2111 list_for_each_entry(s, &d->streams, list) in amdtp_domain_start()
2112 amdtp_stream_stop(s); in amdtp_domain_start()
2123 struct amdtp_stream *s, *next; in amdtp_domain_stop() local
2128 list_for_each_entry_safe(s, next, &d->streams, list) { in amdtp_domain_stop()
2129 list_del(&s->list); in amdtp_domain_stop()
2131 if (s != d->irq_target) in amdtp_domain_stop()
2132 amdtp_stream_stop(s); in amdtp_domain_stop()