1#!/usr/bin/env python3
2# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
3"""Convert directories of JSON events to C code."""
4import argparse
5import csv
6import json
7import os
8import sys
9from typing import (Callable, Dict, Optional, Sequence, Set, Tuple)
10import collections
11
12# Global command line arguments.
13_args = None
14# List of event tables generated from "/sys" directories.
15_sys_event_tables = []
16# Map from an event name to an architecture standard
17# JsonEvent. Architecture standard events are in json files in the top
18# f'{_args.starting_dir}/{_args.arch}' directory.
19_arch_std_events = {}
20# Track whether an events table is currently being defined and needs closing.
21_close_table = False
22# Events to write out when the table is closed
23_pending_events = []
24# Global BigCString shared by all structures.
25_bcs = None
26# Order specific JsonEvent attributes will be visited.
27_json_event_attributes = [
28    # cmp_sevent related attributes.
29    'name', 'pmu', 'topic', 'desc', 'metric_name', 'metric_group',
30    # Seems useful, put it early.
31    'event',
32    # Short things in alphabetical order.
33    'aggr_mode', 'compat', 'deprecated', 'perpkg', 'unit',
34    # Longer things (the last won't be iterated over during decompress).
35    'metric_constraint', 'metric_expr', 'long_desc'
36]
37
38
39def removesuffix(s: str, suffix: str) -> str:
40  """Remove the suffix from a string
41
42  The removesuffix function is added to str in Python 3.9. We aim for 3.6
43  compatibility and so provide our own function here.
44  """
45  return s[0:-len(suffix)] if s.endswith(suffix) else s
46
47
48def file_name_to_table_name(parents: Sequence[str], dirname: str) -> str:
49  """Generate a C table name from directory names."""
50  tblname = 'pme'
51  for p in parents:
52    tblname += '_' + p
53  tblname += '_' + dirname
54  return tblname.replace('-', '_')
55
56def c_len(s: str) -> int:
57  """Return the length of s a C string
58
59  This doesn't handle all escape characters properly. It first assumes
60  all \ are for escaping, it then adjusts as it will have over counted
61  \\. The code uses \000 rather than \0 as a terminator as an adjacent
62  number would be folded into a string of \0 (ie. "\0" + "5" doesn't
63  equal a terminator followed by the number 5 but the escape of
64  \05). The code adjusts for \000 but not properly for all octal, hex
65  or unicode values.
66  """
67  try:
68    utf = s.encode(encoding='utf-8',errors='strict')
69  except:
70    print(f'broken string {s}')
71    raise
72  return len(utf) - utf.count(b'\\') + utf.count(b'\\\\') - (utf.count(b'\\000') * 2)
73
74class BigCString:
75  """A class to hold many strings concatenated together.
76
77  Generating a large number of stand-alone C strings creates a large
78  number of relocations in position independent code. The BigCString
79  is a helper for this case. It builds a single string which within it
80  are all the other C strings (to avoid memory issues the string
81  itself is held as a list of strings). The offsets within the big
82  string are recorded and when stored to disk these don't need
83  relocation. To reduce the size of the string further, identical
84  strings are merged. If a longer string ends-with the same value as a
85  shorter string, these entries are also merged.
86  """
87  strings: Set[str]
88  big_string: Sequence[str]
89  offsets: Dict[str, int]
90
91  def __init__(self):
92    self.strings = set()
93
94  def add(self, s: str) -> None:
95    """Called to add to the big string."""
96    self.strings.add(s)
97
98  def compute(self) -> None:
99    """Called once all strings are added to compute the string and offsets."""
100
101    folded_strings = {}
102    # Determine if two strings can be folded, ie. let 1 string use the
103    # end of another. First reverse all strings and sort them.
104    sorted_reversed_strings = sorted([x[::-1] for x in self.strings])
105
106    # Strings 'xyz' and 'yz' will now be [ 'zy', 'zyx' ]. Scan forward
107    # for each string to see if there is a better candidate to fold it
108    # into, in the example rather than using 'yz' we can use'xyz' at
109    # an offset of 1. We record which string can be folded into which
110    # in folded_strings, we don't need to record the offset as it is
111    # trivially computed from the string lengths.
112    for pos,s in enumerate(sorted_reversed_strings):
113      best_pos = pos
114      for check_pos in range(pos + 1, len(sorted_reversed_strings)):
115        if sorted_reversed_strings[check_pos].startswith(s):
116          best_pos = check_pos
117        else:
118          break
119      if pos != best_pos:
120        folded_strings[s[::-1]] = sorted_reversed_strings[best_pos][::-1]
121
122    # Compute reverse mappings for debugging.
123    fold_into_strings = collections.defaultdict(set)
124    for key, val in folded_strings.items():
125      if key != val:
126        fold_into_strings[val].add(key)
127
128    # big_string_offset is the current location within the C string
129    # being appended to - comments, etc. don't count. big_string is
130    # the string contents represented as a list. Strings are immutable
131    # in Python and so appending to one causes memory issues, while
132    # lists are mutable.
133    big_string_offset = 0
134    self.big_string = []
135    self.offsets = {}
136
137    # Emit all strings that aren't folded in a sorted manner.
138    for s in sorted(self.strings):
139      if s not in folded_strings:
140        self.offsets[s] = big_string_offset
141        self.big_string.append(f'/* offset={big_string_offset} */ "')
142        self.big_string.append(s)
143        self.big_string.append('"')
144        if s in fold_into_strings:
145          self.big_string.append(' /* also: ' + ', '.join(fold_into_strings[s]) + ' */')
146        self.big_string.append('\n')
147        big_string_offset += c_len(s)
148        continue
149
150    # Compute the offsets of the folded strings.
151    for s in folded_strings.keys():
152      assert s not in self.offsets
153      folded_s = folded_strings[s]
154      self.offsets[s] = self.offsets[folded_s] + c_len(folded_s) - c_len(s)
155
156_bcs = BigCString()
157
158class JsonEvent:
159  """Representation of an event loaded from a json file dictionary."""
160
161  def __init__(self, jd: dict):
162    """Constructor passed the dictionary of parsed json values."""
163
164    def llx(x: int) -> str:
165      """Convert an int to a string similar to a printf modifier of %#llx."""
166      return '0' if x == 0 else hex(x)
167
168    def fixdesc(s: str) -> str:
169      """Fix formatting issue for the desc string."""
170      if s is None:
171        return None
172      return removesuffix(removesuffix(removesuffix(s, '.  '),
173                                       '. '), '.').replace('\n', '\\n').replace(
174                                           '\"', '\\"').replace('\r', '\\r')
175
176    def convert_aggr_mode(aggr_mode: str) -> Optional[str]:
177      """Returns the aggr_mode_class enum value associated with the JSON string."""
178      if not aggr_mode:
179        return None
180      aggr_mode_to_enum = {
181          'PerChip': '1',
182          'PerCore': '2',
183      }
184      return aggr_mode_to_enum[aggr_mode]
185
186    def lookup_msr(num: str) -> Optional[str]:
187      """Converts the msr number, or first in a list to the appropriate event field."""
188      if not num:
189        return None
190      msrmap = {
191          0x3F6: 'ldlat=',
192          0x1A6: 'offcore_rsp=',
193          0x1A7: 'offcore_rsp=',
194          0x3F7: 'frontend=',
195      }
196      return msrmap[int(num.split(',', 1)[0], 0)]
197
198    def real_event(name: str, event: str) -> Optional[str]:
199      """Convert well known event names to an event string otherwise use the event argument."""
200      fixed = {
201          'inst_retired.any': 'event=0xc0,period=2000003',
202          'inst_retired.any_p': 'event=0xc0,period=2000003',
203          'cpu_clk_unhalted.ref': 'event=0x0,umask=0x03,period=2000003',
204          'cpu_clk_unhalted.thread': 'event=0x3c,period=2000003',
205          'cpu_clk_unhalted.core': 'event=0x3c,period=2000003',
206          'cpu_clk_unhalted.thread_any': 'event=0x3c,any=1,period=2000003',
207      }
208      if not name:
209        return None
210      if name.lower() in fixed:
211        return fixed[name.lower()]
212      return event
213
214    def unit_to_pmu(unit: str) -> Optional[str]:
215      """Convert a JSON Unit to Linux PMU name."""
216      if not unit:
217        return None
218      # Comment brought over from jevents.c:
219      # it's not realistic to keep adding these, we need something more scalable ...
220      table = {
221          'CBO': 'uncore_cbox',
222          'QPI LL': 'uncore_qpi',
223          'SBO': 'uncore_sbox',
224          'iMPH-U': 'uncore_arb',
225          'CPU-M-CF': 'cpum_cf',
226          'CPU-M-SF': 'cpum_sf',
227          'PAI-CRYPTO' : 'pai_crypto',
228          'UPI LL': 'uncore_upi',
229          'hisi_sicl,cpa': 'hisi_sicl,cpa',
230          'hisi_sccl,ddrc': 'hisi_sccl,ddrc',
231          'hisi_sccl,hha': 'hisi_sccl,hha',
232          'hisi_sccl,l3c': 'hisi_sccl,l3c',
233          'imx8_ddr': 'imx8_ddr',
234          'L3PMC': 'amd_l3',
235          'DFPMC': 'amd_df',
236          'cpu_core': 'cpu_core',
237          'cpu_atom': 'cpu_atom',
238      }
239      return table[unit] if unit in table else f'uncore_{unit.lower()}'
240
241    eventcode = 0
242    if 'EventCode' in jd:
243      eventcode = int(jd['EventCode'].split(',', 1)[0], 0)
244    if 'ExtSel' in jd:
245      eventcode |= int(jd['ExtSel']) << 8
246    configcode = int(jd['ConfigCode'], 0) if 'ConfigCode' in jd else None
247    self.name = jd['EventName'].lower() if 'EventName' in jd else None
248    self.topic = ''
249    self.compat = jd.get('Compat')
250    self.desc = fixdesc(jd.get('BriefDescription'))
251    self.long_desc = fixdesc(jd.get('PublicDescription'))
252    precise = jd.get('PEBS')
253    msr = lookup_msr(jd.get('MSRIndex'))
254    msrval = jd.get('MSRValue')
255    extra_desc = ''
256    if 'Data_LA' in jd:
257      extra_desc += '  Supports address when precise'
258      if 'Errata' in jd:
259        extra_desc += '.'
260    if 'Errata' in jd:
261      extra_desc += '  Spec update: ' + jd['Errata']
262    self.pmu = unit_to_pmu(jd.get('Unit'))
263    filter = jd.get('Filter')
264    self.unit = jd.get('ScaleUnit')
265    self.perpkg = jd.get('PerPkg')
266    self.aggr_mode = convert_aggr_mode(jd.get('AggregationMode'))
267    self.deprecated = jd.get('Deprecated')
268    self.metric_name = jd.get('MetricName')
269    self.metric_group = jd.get('MetricGroup')
270    self.metric_constraint = jd.get('MetricConstraint')
271    self.metric_expr = jd.get('MetricExpr')
272    if self.metric_expr:
273      self.metric_expr = self.metric_expr.replace('\\', '\\\\')
274    arch_std = jd.get('ArchStdEvent')
275    if precise and self.desc and '(Precise Event)' not in self.desc:
276      extra_desc += ' (Must be precise)' if precise == '2' else (' (Precise '
277                                                                 'event)')
278    event = f'config={llx(configcode)}' if configcode is not None else f'event={llx(eventcode)}'
279    event_fields = [
280        ('AnyThread', 'any='),
281        ('PortMask', 'ch_mask='),
282        ('CounterMask', 'cmask='),
283        ('EdgeDetect', 'edge='),
284        ('FCMask', 'fc_mask='),
285        ('Invert', 'inv='),
286        ('SampleAfterValue', 'period='),
287        ('UMask', 'umask='),
288    ]
289    for key, value in event_fields:
290      if key in jd and jd[key] != '0':
291        event += ',' + value + jd[key]
292    if filter:
293      event += f',{filter}'
294    if msr:
295      event += f',{msr}{msrval}'
296    if self.desc and extra_desc:
297      self.desc += extra_desc
298    if self.long_desc and extra_desc:
299      self.long_desc += extra_desc
300    if self.pmu:
301      if self.desc and not self.desc.endswith('. '):
302        self.desc += '. '
303      self.desc = (self.desc if self.desc else '') + ('Unit: ' + self.pmu + ' ')
304    if arch_std and arch_std.lower() in _arch_std_events:
305      event = _arch_std_events[arch_std.lower()].event
306      # Copy from the architecture standard event to self for undefined fields.
307      for attr, value in _arch_std_events[arch_std.lower()].__dict__.items():
308        if hasattr(self, attr) and not getattr(self, attr):
309          setattr(self, attr, value)
310
311    self.event = real_event(self.name, event)
312
313  def __repr__(self) -> str:
314    """String representation primarily for debugging."""
315    s = '{\n'
316    for attr, value in self.__dict__.items():
317      if value:
318        s += f'\t{attr} = {value},\n'
319    return s + '}'
320
321  def build_c_string(self) -> str:
322    s = ''
323    for attr in _json_event_attributes:
324      x = getattr(self, attr)
325      s += f'{x}\\000' if x else '\\000'
326    return s
327
328  def to_c_string(self) -> str:
329    """Representation of the event as a C struct initializer."""
330
331    s = self.build_c_string()
332    return f'{{ { _bcs.offsets[s] } }}, /* {s} */\n'
333
334
335def read_json_events(path: str, topic: str) -> Sequence[JsonEvent]:
336  """Read json events from the specified file."""
337
338  try:
339    result = json.load(open(path), object_hook=JsonEvent)
340  except BaseException as err:
341    print(f"Exception processing {path}")
342    raise
343  for event in result:
344    event.topic = topic
345  return result
346
347def preprocess_arch_std_files(archpath: str) -> None:
348  """Read in all architecture standard events."""
349  global _arch_std_events
350  for item in os.scandir(archpath):
351    if item.is_file() and item.name.endswith('.json'):
352      for event in read_json_events(item.path, topic=''):
353        if event.name:
354          _arch_std_events[event.name.lower()] = event
355
356
357def print_events_table_prefix(tblname: str) -> None:
358  """Called when a new events table is started."""
359  global _close_table
360  if _close_table:
361    raise IOError('Printing table prefix but last table has no suffix')
362  _args.output_file.write(f'static const struct compact_pmu_event {tblname}[] = {{\n')
363  _close_table = True
364
365
366def add_events_table_entries(item: os.DirEntry, topic: str) -> None:
367  """Add contents of file to _pending_events table."""
368  if not _close_table:
369    raise IOError('Table entries missing prefix')
370  for e in read_json_events(item.path, topic):
371    _pending_events.append(e)
372
373
374def print_events_table_suffix() -> None:
375  """Optionally close events table."""
376
377  def event_cmp_key(j: JsonEvent) -> Tuple[bool, str, str, str, str]:
378    def fix_none(s: Optional[str]) -> str:
379      if s is None:
380        return ''
381      return s
382
383    return (j.desc is not None, fix_none(j.topic), fix_none(j.name), fix_none(j.pmu),
384            fix_none(j.metric_name))
385
386  global _close_table
387  if not _close_table:
388    return
389
390  global _pending_events
391  for event in sorted(_pending_events, key=event_cmp_key):
392    _args.output_file.write(event.to_c_string())
393    _pending_events = []
394
395  _args.output_file.write('};\n\n')
396  _close_table = False
397
398def get_topic(topic: str) -> str:
399  if topic.endswith('metrics.json'):
400    return 'metrics'
401  return removesuffix(topic, '.json').replace('-', ' ')
402
403def preprocess_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
404
405  if item.is_dir():
406    return
407
408  # base dir or too deep
409  level = len(parents)
410  if level == 0 or level > 4:
411    return
412
413  # Ignore other directories. If the file name does not have a .json
414  # extension, ignore it. It could be a readme.txt for instance.
415  if not item.is_file() or not item.name.endswith('.json'):
416    return
417
418  topic = get_topic(item.name)
419  for event in read_json_events(item.path, topic):
420    _bcs.add(event.build_c_string())
421
422def process_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
423  """Process a JSON file during the main walk."""
424  global _sys_event_tables
425
426  def is_leaf_dir(path: str) -> bool:
427    for item in os.scandir(path):
428      if item.is_dir():
429        return False
430    return True
431
432  # model directory, reset topic
433  if item.is_dir() and is_leaf_dir(item.path):
434    print_events_table_suffix()
435
436    tblname = file_name_to_table_name(parents, item.name)
437    if item.name == 'sys':
438      _sys_event_tables.append(tblname)
439    print_events_table_prefix(tblname)
440    return
441
442  # base dir or too deep
443  level = len(parents)
444  if level == 0 or level > 4:
445    return
446
447  # Ignore other directories. If the file name does not have a .json
448  # extension, ignore it. It could be a readme.txt for instance.
449  if not item.is_file() or not item.name.endswith('.json'):
450    return
451
452  add_events_table_entries(item, get_topic(item.name))
453
454
455def print_mapping_table(archs: Sequence[str]) -> None:
456  """Read the mapfile and generate the struct from cpuid string to event table."""
457  _args.output_file.write("""
458/* Struct used to make the PMU event table implementation opaque to callers. */
459struct pmu_events_table {
460        const struct compact_pmu_event *entries;
461        size_t length;
462};
463
464/*
465 * Map a CPU to its table of PMU events. The CPU is identified by the
466 * cpuid field, which is an arch-specific identifier for the CPU.
467 * The identifier specified in tools/perf/pmu-events/arch/xxx/mapfile
468 * must match the get_cpuid_str() in tools/perf/arch/xxx/util/header.c)
469 *
470 * The  cpuid can contain any character other than the comma.
471 */
472struct pmu_events_map {
473        const char *arch;
474        const char *cpuid;
475        struct pmu_events_table table;
476};
477
478/*
479 * Global table mapping each known CPU for the architecture to its
480 * table of PMU events.
481 */
482const struct pmu_events_map pmu_events_map[] = {
483""")
484  for arch in archs:
485    if arch == 'test':
486      _args.output_file.write("""{
487\t.arch = "testarch",
488\t.cpuid = "testcpu",
489\t.table = {
490\t.entries = pme_test_soc_cpu,
491\t.length = ARRAY_SIZE(pme_test_soc_cpu),
492\t}
493},
494""")
495    else:
496      with open(f'{_args.starting_dir}/{arch}/mapfile.csv') as csvfile:
497        table = csv.reader(csvfile)
498        first = True
499        for row in table:
500          # Skip the first row or any row beginning with #.
501          if not first and len(row) > 0 and not row[0].startswith('#'):
502            tblname = file_name_to_table_name([], row[2].replace('/', '_'))
503            cpuid = row[0].replace('\\', '\\\\')
504            _args.output_file.write(f"""{{
505\t.arch = "{arch}",
506\t.cpuid = "{cpuid}",
507\t.table = {{
508\t\t.entries = {tblname},
509\t\t.length = ARRAY_SIZE({tblname})
510\t}}
511}},
512""")
513          first = False
514
515  _args.output_file.write("""{
516\t.arch = 0,
517\t.cpuid = 0,
518\t.table = { 0, 0 },
519}
520};
521""")
522
523
524def print_system_mapping_table() -> None:
525  """C struct mapping table array for tables from /sys directories."""
526  _args.output_file.write("""
527struct pmu_sys_events {
528\tconst char *name;
529\tstruct pmu_events_table table;
530};
531
532static const struct pmu_sys_events pmu_sys_event_tables[] = {
533""")
534  for tblname in _sys_event_tables:
535    _args.output_file.write(f"""\t{{
536\t\t.table = {{
537\t\t\t.entries = {tblname},
538\t\t\t.length = ARRAY_SIZE({tblname})
539\t\t}},
540\t\t.name = \"{tblname}\",
541\t}},
542""")
543  _args.output_file.write("""\t{
544\t\t.table = { 0, 0 }
545\t},
546};
547
548static void decompress(int offset, struct pmu_event *pe)
549{
550\tconst char *p = &big_c_string[offset];
551""")
552  for attr in _json_event_attributes:
553    _args.output_file.write(f"""
554\tpe->{attr} = (*p == '\\0' ? NULL : p);
555""")
556    if attr == _json_event_attributes[-1]:
557      continue
558    _args.output_file.write('\twhile (*p++);')
559  _args.output_file.write("""}
560
561int pmu_events_table_for_each_event(const struct pmu_events_table *table,
562                                    pmu_event_iter_fn fn,
563                                    void *data)
564{
565        for (size_t i = 0; i < table->length; i++) {
566                struct pmu_event pe;
567                int ret;
568
569                decompress(table->entries[i].offset, &pe);
570                ret = fn(&pe, table, data);
571                if (ret)
572                        return ret;
573        }
574        return 0;
575}
576
577const struct pmu_events_table *perf_pmu__find_table(struct perf_pmu *pmu)
578{
579        const struct pmu_events_table *table = NULL;
580        char *cpuid = perf_pmu__getcpuid(pmu);
581        int i;
582
583        /* on some platforms which uses cpus map, cpuid can be NULL for
584         * PMUs other than CORE PMUs.
585         */
586        if (!cpuid)
587                return NULL;
588
589        i = 0;
590        for (;;) {
591                const struct pmu_events_map *map = &pmu_events_map[i++];
592                if (!map->arch)
593                        break;
594
595                if (!strcmp_cpuid_str(map->cpuid, cpuid)) {
596                        table = &map->table;
597                        break;
598                }
599        }
600        free(cpuid);
601        return table;
602}
603
604const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid)
605{
606        for (const struct pmu_events_map *tables = &pmu_events_map[0];
607             tables->arch;
608             tables++) {
609                if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
610                        return &tables->table;
611        }
612        return NULL;
613}
614
615int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data)
616{
617        for (const struct pmu_events_map *tables = &pmu_events_map[0];
618             tables->arch;
619             tables++) {
620                int ret = pmu_events_table_for_each_event(&tables->table, fn, data);
621
622                if (ret)
623                        return ret;
624        }
625        return 0;
626}
627
628const struct pmu_events_table *find_sys_events_table(const char *name)
629{
630        for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
631             tables->name;
632             tables++) {
633                if (!strcmp(tables->name, name))
634                        return &tables->table;
635        }
636        return NULL;
637}
638
639int pmu_for_each_sys_event(pmu_event_iter_fn fn, void *data)
640{
641        for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
642             tables->name;
643             tables++) {
644                int ret = pmu_events_table_for_each_event(&tables->table, fn, data);
645
646                if (ret)
647                        return ret;
648        }
649        return 0;
650}
651""")
652
653
654def main() -> None:
655  global _args
656
657  def dir_path(path: str) -> str:
658    """Validate path is a directory for argparse."""
659    if os.path.isdir(path):
660      return path
661    raise argparse.ArgumentTypeError(f'\'{path}\' is not a valid directory')
662
663  def ftw(path: str, parents: Sequence[str],
664          action: Callable[[Sequence[str], os.DirEntry], None]) -> None:
665    """Replicate the directory/file walking behavior of C's file tree walk."""
666    for item in os.scandir(path):
667      action(parents, item)
668      if item.is_dir():
669        ftw(item.path, parents + [item.name], action)
670
671  ap = argparse.ArgumentParser()
672  ap.add_argument('arch', help='Architecture name like x86')
673  ap.add_argument(
674      'starting_dir',
675      type=dir_path,
676      help='Root of tree containing architecture directories containing json files'
677  )
678  ap.add_argument(
679      'output_file', type=argparse.FileType('w', encoding='utf-8'), nargs='?', default=sys.stdout)
680  _args = ap.parse_args()
681
682  _args.output_file.write("""
683#include "pmu-events/pmu-events.h"
684#include "util/header.h"
685#include "util/pmu.h"
686#include <string.h>
687#include <stddef.h>
688
689struct compact_pmu_event {
690  int offset;
691};
692
693""")
694  archs = []
695  for item in os.scandir(_args.starting_dir):
696    if not item.is_dir():
697      continue
698    if item.name == _args.arch or _args.arch == 'all' or item.name == 'test':
699      archs.append(item.name)
700
701  if len(archs) < 2:
702    raise IOError(f'Missing architecture directory \'{_args.arch}\'')
703
704  archs.sort()
705  for arch in archs:
706    arch_path = f'{_args.starting_dir}/{arch}'
707    preprocess_arch_std_files(arch_path)
708    ftw(arch_path, [], preprocess_one_file)
709
710  _bcs.compute()
711  _args.output_file.write('static const char *const big_c_string =\n')
712  for s in _bcs.big_string:
713    _args.output_file.write(s)
714  _args.output_file.write(';\n\n')
715  for arch in archs:
716    arch_path = f'{_args.starting_dir}/{arch}'
717    ftw(arch_path, [], process_one_file)
718    print_events_table_suffix()
719
720  print_mapping_table(archs)
721  print_system_mapping_table()
722
723
724if __name__ == '__main__':
725  main()
726